code
stringlengths
6
250k
repo_name
stringlengths
5
70
path
stringlengths
3
177
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
250k
/* Only eth0 supported for now * * (C) Copyright 2003 * Thomas.Lange@corelatus.se * * SPDX-License-Identifier: GPL-2.0+ */ #include <config.h> #if defined(CONFIG_SYS_DISCOVER_PHY) #error "PHY not supported yet" /* We just assume that we are running 100FD for now */ /* We all use switches, right? ;-) */ #endif /* I assume ethernet behaves like au1000 */ #ifdef CONFIG_SOC_AU1000 /* Base address differ between cpu:s */ #define ETH0_BASE AU1000_ETH0_BASE #define MAC0_ENABLE AU1000_MAC0_ENABLE #else #ifdef CONFIG_SOC_AU1100 #define ETH0_BASE AU1100_ETH0_BASE #define MAC0_ENABLE AU1100_MAC0_ENABLE #else #ifdef CONFIG_SOC_AU1500 #define ETH0_BASE AU1500_ETH0_BASE #define MAC0_ENABLE AU1500_MAC0_ENABLE #else #ifdef CONFIG_SOC_AU1550 #define ETH0_BASE AU1550_ETH0_BASE #define MAC0_ENABLE AU1550_MAC0_ENABLE #else #error "No valid cpu set" #endif #endif #endif #endif #include <common.h> #include <malloc.h> #include <net.h> #include <command.h> #include <asm/io.h> #include <mach/au1x00.h> #if defined(CONFIG_CMD_MII) #include <miiphy.h> #endif /* Ethernet Transmit and Receive Buffers */ #define DBUF_LENGTH 1520 #define PKT_MAXBUF_SIZE 1518 static char txbuf[DBUF_LENGTH]; static int next_tx; static int next_rx; /* 4 rx and 4 tx fifos */ #define NO_OF_FIFOS 4 typedef struct{ u32 status; u32 addr; u32 len; /* Only used for tx */ u32 not_used; } mac_fifo_t; mac_fifo_t mac_fifo[NO_OF_FIFOS]; #define MAX_WAIT 1000 #if defined(CONFIG_CMD_MII) int au1x00_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg) { unsigned short value = 0; volatile u32 *mii_control_reg = (volatile u32*)(ETH0_BASE+MAC_MII_CNTRL); volatile u32 *mii_data_reg = (volatile u32*)(ETH0_BASE+MAC_MII_DATA); u32 mii_control; unsigned int timedout = 20; while (*mii_control_reg & MAC_MII_BUSY) { udelay(1000); if (--timedout == 0) { printf("au1x00_eth: miiphy_read busy timeout!!\n"); return -1; } } mii_control = MAC_SET_MII_SELECT_REG(reg) | MAC_SET_MII_SELECT_PHY(addr) | MAC_MII_READ; *mii_control_reg = mii_control; timedout = 20; while (*mii_control_reg & MAC_MII_BUSY) { udelay(1000); if (--timedout == 0) { printf("au1x00_eth: miiphy_read busy timeout!!\n"); return -1; } } value = *mii_data_reg; return value; } int au1x00_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg, u16 value) { volatile u32 *mii_control_reg = (volatile u32*)(ETH0_BASE+MAC_MII_CNTRL); volatile u32 *mii_data_reg = (volatile u32*)(ETH0_BASE+MAC_MII_DATA); u32 mii_control; unsigned int timedout = 20; while (*mii_control_reg & MAC_MII_BUSY) { udelay(1000); if (--timedout == 0) { printf("au1x00_eth: miiphy_write busy timeout!!\n"); return -1; } } mii_control = MAC_SET_MII_SELECT_REG(reg) | MAC_SET_MII_SELECT_PHY(addr) | MAC_MII_WRITE; *mii_data_reg = value; *mii_control_reg = mii_control; return 0; } #endif static int au1x00_send(struct eth_device *dev, void *packet, int length) { volatile mac_fifo_t *fifo_tx = (volatile mac_fifo_t*)(MAC0_TX_DMA_ADDR+MAC_TX_BUFF0_STATUS); int i; int res; /* tx fifo should always be idle */ fifo_tx[next_tx].len = length; fifo_tx[next_tx].addr = (virt_to_phys(packet))|TX_DMA_ENABLE; au_sync(); udelay(1); i=0; while(!(fifo_tx[next_tx].addr&TX_T_DONE)){ if(i>MAX_WAIT){ printf("TX timeout\n"); break; } udelay(1); i++; } /* Clear done bit */ fifo_tx[next_tx].addr = 0; fifo_tx[next_tx].len = 0; au_sync(); res = fifo_tx[next_tx].status; next_tx++; if(next_tx>=NO_OF_FIFOS){ next_tx=0; } return(res); } static int au1x00_recv(struct eth_device* dev){ volatile mac_fifo_t *fifo_rx = (volatile mac_fifo_t*)(MAC0_RX_DMA_ADDR+MAC_RX_BUFF0_STATUS); int length; u32 status; for(;;){ if(!(fifo_rx[next_rx].addr&RX_T_DONE)){ /* Nothing has been received */ return(-1); } status = fifo_rx[next_rx].status; length = status&0x3FFF; if(status&RX_ERROR){ printf("Rx error 0x%x\n", status); } else { /* Pass the packet up to the protocol layers. */ net_process_received_packet(net_rx_packets[next_rx], length - 4); } fifo_rx[next_rx].addr = (virt_to_phys(net_rx_packets[next_rx])) | RX_DMA_ENABLE; next_rx++; if(next_rx>=NO_OF_FIFOS){ next_rx=0; } } /* for */ return(0); /* Does anyone use this? */ } static int au1x00_init(struct eth_device* dev, bd_t * bd){ volatile u32 *macen = (volatile u32*)MAC0_ENABLE; volatile u32 *mac_ctrl = (volatile u32*)(ETH0_BASE+MAC_CONTROL); volatile u32 *mac_addr_high = (volatile u32*)(ETH0_BASE+MAC_ADDRESS_HIGH); volatile u32 *mac_addr_low = (volatile u32*)(ETH0_BASE+MAC_ADDRESS_LOW); volatile u32 *mac_mcast_high = (volatile u32*)(ETH0_BASE+MAC_MCAST_HIGH); volatile u32 *mac_mcast_low = (volatile u32*)(ETH0_BASE+MAC_MCAST_LOW); volatile mac_fifo_t *fifo_tx = (volatile mac_fifo_t*)(MAC0_TX_DMA_ADDR+MAC_TX_BUFF0_STATUS); volatile mac_fifo_t *fifo_rx = (volatile mac_fifo_t*)(MAC0_RX_DMA_ADDR+MAC_RX_BUFF0_STATUS); int i; next_tx = TX_GET_DMA_BUFFER(fifo_tx[0].addr); next_rx = RX_GET_DMA_BUFFER(fifo_rx[0].addr); /* We have to enable clocks before releasing reset */ *macen = MAC_EN_CLOCK_ENABLE; udelay(10); /* Enable MAC0 */ /* We have to release reset before accessing registers */ *macen = MAC_EN_CLOCK_ENABLE|MAC_EN_RESET0| MAC_EN_RESET1|MAC_EN_RESET2; udelay(10); for(i=0;i<NO_OF_FIFOS;i++){ fifo_tx[i].len = 0; fifo_tx[i].addr = virt_to_phys(&txbuf[0]); fifo_rx[i].addr = (virt_to_phys(net_rx_packets[i])) | RX_DMA_ENABLE; } /* Put mac addr in little endian */ #define ea eth_get_ethaddr() *mac_addr_high = (ea[5] << 8) | (ea[4] ) ; *mac_addr_low = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | (ea[0] ) ; #undef ea *mac_mcast_low = 0; *mac_mcast_high = 0; /* Make sure the MAC buffer is in the correct endian mode */ #ifdef __LITTLE_ENDIAN *mac_ctrl = MAC_FULL_DUPLEX; udelay(1); *mac_ctrl = MAC_FULL_DUPLEX|MAC_RX_ENABLE|MAC_TX_ENABLE; #else *mac_ctrl = MAC_BIG_ENDIAN|MAC_FULL_DUPLEX; udelay(1); *mac_ctrl = MAC_BIG_ENDIAN|MAC_FULL_DUPLEX|MAC_RX_ENABLE|MAC_TX_ENABLE; #endif return(1); } static void au1x00_halt(struct eth_device* dev){ volatile u32 *macen = (volatile u32*)MAC0_ENABLE; /* Put MAC0 in reset */ *macen = 0; } int au1x00_enet_initialize(bd_t *bis){ struct eth_device* dev; if ((dev = (struct eth_device*)malloc(sizeof *dev)) == NULL) { puts ("malloc failed\n"); return -1; } memset(dev, 0, sizeof *dev); strcpy(dev->name, "Au1X00 ethernet"); dev->iobase = 0; dev->priv = 0; dev->init = au1x00_init; dev->halt = au1x00_halt; dev->send = au1x00_send; dev->recv = au1x00_recv; eth_register(dev); #if defined(CONFIG_CMD_MII) int retval; struct mii_dev *mdiodev = mdio_alloc(); if (!mdiodev) return -ENOMEM; strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN); mdiodev->read = au1x00_miiphy_read; mdiodev->write = au1x00_miiphy_write; retval = mdio_register(mdiodev); if (retval < 0) return retval; #endif return 1; } int cpu_eth_init(bd_t *bis) { au1x00_enet_initialize(bis); return 0; }
guileschool/beagleboard
u-boot/arch/mips/mach-au1x00/au1x00_eth.c
C
mit
7,093
/****************************************************************************** * Spine Runtimes Software License * Version 2 * * Copyright (c) 2013, Esoteric Software * All rights reserved. * * You are granted a perpetual, non-exclusive, non-sublicensable and * non-transferable license to install, execute and perform the Spine Runtimes * Software (the "Software") solely for internal use. Without the written * permission of Esoteric Software, you may not (a) modify, translate, adapt or * otherwise create derivative works, improvements of the Software or develop * new applications using the Software or (b) remove, delete, alter or obscure * any trademarks or any copyright, trademark, patent or other intellectual * property or proprietary rights notices on or in the Software, including * any copy thereof. Redistributions in binary or source form must include * this license and terms. THIS SOFTWARE IS PROVIDED BY ESOTERIC SOFTWARE * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ESOTERIC SOFTARE BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include <spine/EventData.h> #include <spine/extension.h> spEventData* spEventData_create (const char* name) { spEventData* self = NEW(spEventData); MALLOC_STR(self->name, name); return self; } void spEventData_dispose (spEventData* self) { FREE(self->stringValue); FREE(self->name); FREE(self); }
RichardRanft/Torque6
src/spine/EventData.c
C
mit
2,042
/* * $Id:$ * * Copyright (C) 2012 Piotr Esden-Tempski <piotr@esden.net> * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include "mcu.h" #include "mcu_periph/sys_time.h" #include "led.h" #include "subsystems/datalink/downlink.h" #include "mcu_periph/uart.h" #include "mcu_periph/can.h" static inline void main_init( void ); static inline void main_periodic_task( void ); static inline void main_event_task( void ); void main_on_can_msg(uint32_t id, uint8_t *data, int len); uint8_t tx_data[8]; uint8_t rx_data[8]; bool new_can_data = false; int main(void) { main_init(); tx_data[0] = 0; tx_data[1] = 0; tx_data[2] = 0; tx_data[3] = 0; tx_data[4] = 0; tx_data[5] = 0; tx_data[6] = 0; tx_data[7] = 0; new_can_data = false; while(1) { if (sys_time_check_and_ack_timer(0)) main_periodic_task(); main_event_task(); } return 0; } static inline void main_init( void ) { mcu_init(); sys_time_register_timer((0.5/PERIODIC_FREQUENCY), NULL); ppz_can_init(main_on_can_msg); } static inline void main_periodic_task( void ) { tx_data[0]+=1; ppz_can_transmit(0, tx_data, 8); LED_PERIODIC(); DOWNLINK_SEND_ALIVE(DefaultChannel, DefaultDevice, 16, MD5SUM); } static inline void main_event_task( void ) { if (new_can_data) { if (rx_data[0] & 0x10) { LED_ON(2); } else { LED_OFF(2); } } if (new_can_data) { if (rx_data[0] & 0x20) { LED_ON(3); } else { LED_OFF(3); } } if (new_can_data) { if (rx_data[0] & 0x40) { LED_ON(4); } else { LED_OFF(4); } } if (new_can_data) { if (rx_data[0] & 0x80) { LED_ON(5); } else { LED_OFF(5); } } } void main_on_can_msg(uint32_t id, uint8_t *data, int len) { for (int i = 0; i<8; i++) { rx_data[i] = data[i]; } new_can_data = true; }
arbuzarbuz/paparazzi
sw/airborne/lisa/test_can.c
C
gpl-2.0
2,489
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <mach/irqs.h> #include <asm/mach-types.h> #include <mach/gpiomux.h> #include "gpiomux_lge_325.h" #include "devices_lge_325.h" #ifdef CONFIG_LGE_PM_CURRENT_CONSUMPTION_FIX #ifdef CONFIG_MACH_LGE_325_BOARD_VZW static struct gpiomux_setting msm_gpio81_cfg_suspend2 = /* BOOT_CONFIG_0 */ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct msm_gpiomux_config msm8x60_current_configs[] __initdata = { /* BOOT_CONFIG_0 */ { .gpio = 81, .settings = { [GPIOMUX_SUSPENDED] = &msm_gpio81_cfg_suspend2, }, }, }; #else static struct gpiomux_setting msm_gpio81_cfg_suspend2 = /* BOOT_CONFIG_0 */ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting msm_gpio84_cfg_suspend2 = /* BOOT_CONFIG_1 */ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting msm_gpio76_cfg_suspend2 = /* BOOT_CONFIG_6*/ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm8x60_current_configs[] __initdata = { /* BOOT_CONFIG_0 */ { .gpio = 81, .settings = { [GPIOMUX_SUSPENDED] = &msm_gpio81_cfg_suspend2, }, }, /* BOOT_CONFIG_1 */ { .gpio = 84, .settings = { [GPIOMUX_SUSPENDED] = &msm_gpio84_cfg_suspend2, }, }, /* BOOT_CONFIG_6*/ { .gpio = 76, .settings = { [GPIOMUX_SUSPENDED] = &msm_gpio76_cfg_suspend2, }, }, }; //for atnt rock_bottom end #endif #endif static struct gpiomux_setting console_uart = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; // static struct gpiomux_setting wifi_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; // /* The SPI configurations apply to GSBI1 and GSBI10 */ static struct gpiomux_setting spi_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting spi_suspended_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting spi_suspended_cs_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; /* This I2C active configuration applies to GSBI3 and GSBI4 */ static struct gpiomux_setting i2c_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #ifdef CONFIG_LGE_FUEL_GAUGE static struct gpiomux_setting gsbi5 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #endif static struct gpiomux_setting i2c_active_gsbi7 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; /* This I2C suspended configuration applies to GSBI3, GSBI4 and GSBI7 */ static struct gpiomux_setting i2c_suspended_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi8 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #ifdef CONFIG_LGE_IRDA static struct gpiomux_setting gsbi8_irda = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi8_irda_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting irda_pwdn_suspended = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; /* static struct gpiomux_setting irda_pwdn_active = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_OUT_HIGH, }; */ #endif #ifdef CONFIG_LGE_FELICA static struct gpiomux_setting uart10dm_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA , .pull = GPIOMUX_PULL_DOWN, }; #endif static struct gpiomux_setting gsbi10 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #if defined(CONFIG_LGE_BROADCAST_DCM) || defined(CONFIG_LGE_BROADCAST_TDMB) static struct gpiomux_setting gsbi11 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bcast_ctrl_pin = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #if defined (CONFIG_LGE_BROADCAST_TDMB) static struct gpiomux_setting DMB_INT = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #endif #endif #if defined (CONFIG_LGE_WIRELESS_CHARGER_MAX8971) || defined (CONFIG_LGE_WIRELESS_CHARGER_BQ24160) static struct gpiomux_setting gsbi11 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #endif static struct gpiomux_setting gsbi12 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting ps_hold = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting msm_snddev_active_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting msm_snddev_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting sdcc1_dat_0_3_cmd_actv_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_10MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc1_dat_4_7_cmd_actv_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_10MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc1_clk_actv_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdcc1_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc2_dat_0_3_cmd_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_10MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc2_dat_4_7_cmd_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_10MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc2_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdcc2_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc5_dat_0_3_cmd_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_10MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc5_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdcc5_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting aux_pcm_active_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting aux_pcm_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting uart1dm_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting uart1dm_suspended = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdp_vsync_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hdmi_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdm2ap_status_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_status_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_sync_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_sync_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdp_vsync_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting hdmi_active_1_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting hdmi_active_2_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting hdmi_active_3_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting pmic_suspended_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #ifdef CONFIG_MSM_GSBI9_UART static struct gpiomux_setting uart9dm_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA , .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi9 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #endif static struct gpiomux_setting ap2mdm_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdm2ap_status_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_vfr_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting mdm2ap_vfr_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdm2ap_errfatal_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting ap2mdm_kpdpwr_n_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; #ifdef CONFIG_LGE_AUDIO #if 0 /* error CAMCORDER_MIC_EN180 greater than max:173 */ static struct gpiomux_setting camcorder_mic_en_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; #endif static struct gpiomux_setting motor_en_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting lin_motor_pwm_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, // .dir = GPIOMUX_OUT_LOW, }; #ifdef CONFIG_LGE_HEADSET_DETECTION_FSA8008 static struct gpiomux_setting ear_mic_en_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting earpole_detect_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; #endif #endif static struct gpiomux_setting mdm2ap_vddmin_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_vddmin_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #ifdef CONFIG_LGE_FELICA static struct gpiomux_setting felica_pon_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting felica_lockcont_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #endif // #if defined(CONFIG_LGE_NFC_PN544_C2) static struct gpiomux_setting nfc_pn544pn65n_ven_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting nfc_pn544pn65n_irq_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting nfc_pn544pn65n_firm_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_OUT_LOW, }; #endif // #ifdef CONFIG_LGE_MHL_SII9244 static struct gpiomux_setting mhl_detect_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting mhl_reset_n_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct msm_gpiomux_config msm8x60_mhl_configs[] __initdata = { /* MHL_INT */ { .gpio = 30, .settings = { [GPIOMUX_SUSPENDED] = &mhl_detect_cfg, }, }, /* USB_MHL_SEL */ #if defined(CONFIG_MACH_LGE_325_BOARD_LGU) || defined(CONFIG_MACH_LGE_325_BOARD_VZW) { .gpio = 33, .settings = { [GPIOMUX_SUSPENDED] = &mhl_reset_n_cfg, }, }, #else { .gpio = 139, .settings = { [GPIOMUX_SUSPENDED] = &mhl_reset_n_cfg, }, }, #endif /* MHL_RESET_N */ { .gpio = 142, .settings = { [GPIOMUX_SUSPENDED] = &mhl_reset_n_cfg, }, }, /* MHL_WAKE_UP */ { .gpio = 153, .settings = { [GPIOMUX_SUSPENDED] = &mhl_reset_n_cfg, }, }, }; #endif static struct msm_gpiomux_config msm8x60_gsbi_configs[] __initdata = { #if defined(CONFIG_MACH_LGE_325_BOARD_LGU) || defined(CONFIG_MACH_LGE_325_BOARD_VZW) #ifndef CONFIG_LGE_MHL_SII9244 { .gpio = 33, .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, #endif #else { .gpio = 33, .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, #endif { .gpio = 34, .settings = { // //[GPIOMUX_SUSPENDED] = &spi_suspended_config, //[GPIOMUX_ACTIVE] = &spi_active, [GPIOMUX_ACTIVE] = &wifi_active, [GPIOMUX_SUSPENDED] = &wifi_active, }, }, { .gpio = 35, .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_cs_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 36, .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 43, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active, }, }, { .gpio = 44, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active, }, }, // #if defined(CONFIG_LGE_NFC_PN544_C2) { .gpio = 46, .settings = { [GPIOMUX_SUSPENDED] = &nfc_pn544pn65n_firm_cfg, }, }, #endif // { .gpio = 47, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active, }, }, { .gpio = 48, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active, }, }, #ifdef CONFIG_LGE_FUEL_GAUGE { .gpio = 51, .settings = { [GPIOMUX_SUSPENDED] = &gsbi5, }, }, { .gpio = 52, .settings = { [GPIOMUX_SUSPENDED] = &gsbi5, }, }, #endif { .gpio = 59, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active_gsbi7, }, }, { .gpio = 60, .settings = { [GPIOMUX_SUSPENDED] = &i2c_suspended_config, [GPIOMUX_ACTIVE] = &i2c_active_gsbi7, }, }, { .gpio = 64, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8, }, }, { .gpio = 65, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8, }, }, #ifdef CONFIG_LGE_SENSOR_ACCELEROMETER { .gpio = 72, .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, { .gpio = 73, .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, #endif // 2011-05-05 ella.hwang added for 1Seg Driver SPI porting - [Ends] { .gpio = 115, .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, { .gpio = 116, .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, #ifdef CONFIG_LGE_FELICA /* FELICA PON */ { .gpio = 107, .settings = { [GPIOMUX_SUSPENDED] = &felica_pon_cfg, }, }, /* FELICA RFS */ { .gpio = 128, .settings = { [GPIOMUX_SUSPENDED] = &felica_pon_cfg, }, }, /* FELICA INT */ { .gpio = 125, .settings = { [GPIOMUX_SUSPENDED] = &felica_pon_cfg, }, }, /* FELICA CEN */ { .gpio = 123, .settings = { [GPIOMUX_SUSPENDED] = &felica_lockcont_cfg, }, }, #endif // 2011-05-05 ella.hwang added for 1Seg Driver SPI porting - [Begins] #if defined(CONFIG_LGE_BROADCAST_DCM) || defined(CONFIG_LGE_BROADCAST_TDMB) /* DTV_RESET */ { .gpio = 101, .settings = { [GPIOMUX_SUSPENDED] = &bcast_ctrl_pin, }, }, /* DTV EN */ { .gpio = 102, .settings = { [GPIOMUX_SUSPENDED] = &bcast_ctrl_pin, }, }, /* DTV SPI CLOCK */ { .gpio = 103, .settings = { [GPIOMUX_SUSPENDED] = &gsbi11, }, }, /* DTV SPI CS */ { .gpio = 104, .settings = { [GPIOMUX_SUSPENDED] = &gsbi11, }, }, /* DTV SPI MISO */ { .gpio = 105, .settings = { [GPIOMUX_SUSPENDED] = &gsbi11, }, }, /* DTV SPI MOSI */ { .gpio = 106, .settings = { [GPIOMUX_SUSPENDED] = &gsbi11, }, }, #if defined(CONFIG_LGE_BROADCAST_TDMB) /* DMB_INT_N */ { .gpio = 107, .settings = { [GPIOMUX_SUSPENDED] = &DMB_INT, }, }, #endif #endif }; // #if defined(CONFIG_LGE_NFC_PN544_C2) static struct msm_gpiomux_config msm8x60_ebi2_configs[] __initdata = { { .gpio = 123, .settings = { [GPIOMUX_SUSPENDED] = &nfc_pn544pn65n_irq_cfg, }, }, { .gpio = 130, .settings = { [GPIOMUX_SUSPENDED] = &nfc_pn544pn65n_ven_cfg, }, }, }; // #endif #if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE) #endif static struct msm_gpiomux_config msm8x60_uart_configs[] __initdata = { { /* UARTDM_TX */ .gpio = 53, .settings = { [GPIOMUX_ACTIVE] = &uart1dm_active, [GPIOMUX_SUSPENDED] = &uart1dm_suspended, }, }, { /* UARTDM_RX */ .gpio = 54, .settings = { [GPIOMUX_ACTIVE] = &uart1dm_active, [GPIOMUX_SUSPENDED] = &uart1dm_suspended, }, }, { /* UARTDM_CTS */ .gpio = 55, .settings = { [GPIOMUX_ACTIVE] = &uart1dm_active, [GPIOMUX_SUSPENDED] = &uart1dm_suspended, }, }, { /* UARTDM_RFR */ .gpio = 56, .settings = { [GPIOMUX_ACTIVE] = &uart1dm_active, [GPIOMUX_SUSPENDED] = &uart1dm_suspended, }, }, { .gpio = 115, .settings = { [GPIOMUX_SUSPENDED] = &console_uart, }, }, { .gpio = 116, .settings = { [GPIOMUX_SUSPENDED] = &console_uart, }, }, #if !defined(CONFIG_USB_PEHCI_HCD) && !defined(CONFIG_USB_PEHCI_HCD_MODULE) /* USB ISP1763 may also use 117 GPIO */ { .gpio = 117, .settings = { [GPIOMUX_SUSPENDED] = &console_uart, }, }, #endif { .gpio = 118, .settings = { [GPIOMUX_SUSPENDED] = &console_uart, }, }, }; #ifdef CONFIG_MSM_GSBI9_UART static struct msm_gpiomux_config msm8x60_charm_uart_configs[] __initdata = { { /* UART9DM RX */ .gpio = 66, .settings = { [GPIOMUX_ACTIVE] = &uart9dm_active, [GPIOMUX_SUSPENDED] = &gsbi9, }, }, { /* UART9DM TX */ .gpio = 67, .settings = { [GPIOMUX_ACTIVE] = &uart9dm_active, [GPIOMUX_SUSPENDED] = &gsbi9, }, }, }; #endif #ifdef CONFIG_LGE_IRDA static struct msm_gpiomux_config msm8x60_irda_uart_configs[] __initdata = { { .gpio = 62, .settings = { [GPIOMUX_ACTIVE] = &gsbi8_irda_active, [GPIOMUX_SUSPENDED] = &gsbi8_irda, }, }, { .gpio = 63, .settings = { [GPIOMUX_ACTIVE] = &gsbi8_irda_active, [GPIOMUX_SUSPENDED] = &gsbi8_irda, }, }, { .gpio = 169, .settings = { [GPIOMUX_SUSPENDED] = &irda_pwdn_suspended, // [GPIOMUX_ACTIVE] =&irda_pwdn_active, }, }, }; #endif #ifdef CONFIG_LGE_FELICA static struct msm_gpiomux_config msm8x60_felica_uart_configs[] __initdata = { { /* UART10DM RX */ .gpio = 70, .settings = { [GPIOMUX_ACTIVE] = &uart10dm_active, [GPIOMUX_SUSPENDED] = &gsbi10, }, }, { /* UART10DM TX */ .gpio = 71, .settings = { [GPIOMUX_ACTIVE] = &uart10dm_active, [GPIOMUX_SUSPENDED] = &gsbi10, }, }, }; #endif static struct msm_gpiomux_config msm8x60_aux_pcm_configs[] __initdata = { { .gpio = 111, .settings = { [GPIOMUX_ACTIVE] = &aux_pcm_active_config, [GPIOMUX_SUSPENDED] = &aux_pcm_suspend_config, }, }, { .gpio = 112, .settings = { [GPIOMUX_ACTIVE] = &aux_pcm_active_config, [GPIOMUX_SUSPENDED] = &aux_pcm_suspend_config, }, }, { .gpio = 113, .settings = { [GPIOMUX_ACTIVE] = &aux_pcm_active_config, [GPIOMUX_SUSPENDED] = &aux_pcm_suspend_config, }, }, { .gpio = 114, .settings = { [GPIOMUX_ACTIVE] = &aux_pcm_active_config, [GPIOMUX_SUSPENDED] = &aux_pcm_suspend_config, }, }, }; static struct msm_gpiomux_config msm8x60_sdc_configs[] __initdata = { /* SDCC1 data[0] */ { .gpio = 159, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[1] */ { .gpio = 160, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[2] */ { .gpio = 161, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[3] */ { .gpio = 162, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[4] */ { .gpio = 163, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[5] */ { .gpio = 164, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[6] */ { .gpio = 165, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 data[7] */ { .gpio = 166, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 CLK */ { .gpio = 167, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, /* SDCC1 CMD */ { .gpio = 168, .settings = { [GPIOMUX_ACTIVE] = &sdcc1_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc1_suspend_config, }, }, }; static struct msm_gpiomux_config msm8x60_charm_sdc_configs[] __initdata = { /* SDCC5 cmd */ { .gpio = 95, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* SDCC5 data[3]*/ { .gpio = 96, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* SDCC5 clk */ { .gpio = 97, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* SDCC5 data[2]*/ { .gpio = 98, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* SDCC5 data[1]*/ { .gpio = 99, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* SDCC5 data[0]*/ { .gpio = 100, .settings = { [GPIOMUX_ACTIVE] = &sdcc5_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc5_suspend_config, }, }, /* MDM2AP_SYNC */ { .gpio = 129, .settings = { [GPIOMUX_ACTIVE] = &mdm2ap_sync_active_cfg, [GPIOMUX_SUSPENDED] = &mdm2ap_sync_suspend_cfg, }, }, /* MDM2AP_VDDMIN */ { .gpio = 140, .settings = { [GPIOMUX_ACTIVE] = &mdm2ap_vddmin_active_cfg, [GPIOMUX_SUSPENDED] = &mdm2ap_vddmin_suspend_cfg, }, }, /* SDCC2 data[0] */ { .gpio = 143, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[1] */ { .gpio = 144, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[2] */ { .gpio = 145, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[3] */ { .gpio = 146, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[4] */ { .gpio = 147, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[5] */ { .gpio = 148, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[6] */ { .gpio = 149, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 data[7] */ { .gpio = 150, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_4_7_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 CMD */ { .gpio = 151, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_dat_0_3_cmd_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, /* SDCC2 CLK */ { .gpio = 152, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_config, }, }, }; static struct msm_gpiomux_config msm8x60_snd_configs[] __initdata = { { .gpio = 108, .settings = { [GPIOMUX_ACTIVE] = &msm_snddev_active_config, [GPIOMUX_SUSPENDED] = &msm_snddev_suspend_config, }, }, { .gpio = 109, .settings = { [GPIOMUX_ACTIVE] = &msm_snddev_active_config, [GPIOMUX_SUSPENDED] = &msm_snddev_suspend_config, }, }, }; static struct msm_gpiomux_config msm8x60_mdp_vsync_configs[] __initdata = { { .gpio = 28, .settings = { [GPIOMUX_ACTIVE] = &mdp_vsync_active_cfg, [GPIOMUX_SUSPENDED] = &mdp_vsync_suspend_cfg, }, }, }; static struct msm_gpiomux_config msm8x60_hdmi_configs[] __initdata = { { .gpio = 169, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 170, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_2_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 171, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_2_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 172, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_3_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, }; /* Because PMIC drivers do not use gpio-management routines and PMIC * gpios must never sleep, a "good enough" config is obtained by placing * the active config in the 'suspended' slot and leaving the active * config invalid: the suspended config will be installed at boot * and never replaced. */ static struct msm_gpiomux_config msm8x60_pmic_configs[] __initdata = { { .gpio = 88, .settings = { [GPIOMUX_SUSPENDED] = &pmic_suspended_cfg, }, }, { .gpio = 91, .settings = { [GPIOMUX_SUSPENDED] = &pmic_suspended_cfg, }, }, }; static struct msm_gpiomux_config msm8x60_common_configs[] __initdata = { /* MDM2AP_STATUS */ { .gpio = 77, .settings = { [GPIOMUX_ACTIVE] = &mdm2ap_status_active_cfg, [GPIOMUX_SUSPENDED] = &mdm2ap_status_suspend_cfg, }, }, /* PS_HOLD */ { .gpio = 92, .settings = { [GPIOMUX_SUSPENDED] = &ps_hold, }, }, }; static struct msm_gpiomux_config msm8x60_charm_configs[] __initdata = { /* AP2MDM_WAKEUP */ { .gpio = 135, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_VFR */ { .gpio = 94, .settings = { [GPIOMUX_ACTIVE] = &mdm2ap_vfr_active_cfg, [GPIOMUX_SUSPENDED] = &mdm2ap_vfr_suspend_cfg, } }, /* AP2MDM_STATUS */ { .gpio = 136, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_STATUS */ { .gpio = 134, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg, } }, /* MDM2AP_WAKEUP */ { .gpio = 40, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_ERRFATAL */ { .gpio = 133, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_errfatal_cfg, } }, /* AP2MDM_ERRFATAL */ { .gpio = 93, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* AP2MDM_KPDPWR_N */ { .gpio = 132, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } }, /* AP2MDM_PMIC_RESET_N */ { .gpio = 131, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } } }; #ifdef CONFIG_LGE_AUDIO static struct msm_gpiomux_config msm8x60_audio_configs[] __initdata = { #if 0 /* error CAMCORDER_MIC_EN180 greater than max:173 */ /* CAMCORDER_MIC_EN */ { .gpio = GPIO_CAMCORDER_MIC_EN, .settings = { [GPIOMUX_SUSPENDED] = &camcorder_mic_en_cfg, } }, #endif /* MOTOR_EN*/ { .gpio = GPIO_LIN_MOTOR_EN, .settings = { [GPIOMUX_SUSPENDED] = &motor_en_cfg, } }, /* LIN_MOTOR_PWM*/ { .gpio = GPIO_LIN_MOTOR_PWM, .settings = { [GPIOMUX_SUSPENDED] = &lin_motor_pwm_cfg, } }, #ifdef CONFIG_LGE_HEADSET_DETECTION_FSA8008 /* EAR_MIC_EN */ { .gpio = GPIO_EAR_MIC_EN, .settings = { [GPIOMUX_SUSPENDED] = &ear_mic_en_cfg, } }, /* EARPOL_DETECT */ { .gpio = GPIO_EARPOL_DETECT, .settings = { [GPIOMUX_SUSPENDED] = &earpole_detect_cfg, } }, #endif }; #endif #if 0 struct msm_gpiomux_configs msm8x60_charm_gpiomux_cfgs[] __initdata = { {msm8x60_gsbi_configs, ARRAY_SIZE(msm8x60_gsbi_configs)}, {msm8x60_uart_configs, ARRAY_SIZE(msm8x60_uart_configs)}, #ifdef CONFIG_MSM_GSBI9_UART {msm8x60_charm_uart_configs, ARRAY_SIZE(msm8x60_charm_uart_configs)}, #endif #ifdef CONFIG_LGE_FELICA {msm8x60_felica_uart_configs, ARRAY_SIZE(msm8x60_felica_uart_configs)}, #endif {msm8x60_ts_configs, ARRAY_SIZE(msm8x60_ts_configs)}, {msm8x60_aux_pcm_configs, ARRAY_SIZE(msm8x60_aux_pcm_configs)}, {msm8x60_sdc_configs, ARRAY_SIZE(msm8x60_sdc_configs)}, {msm8x60_snd_configs, ARRAY_SIZE(msm8x60_snd_configs)}, {msm8x60_mi2s_configs, ARRAY_SIZE(msm8x60_mi2s_configs)}, {msm8x60_lcdc_configs, ARRAY_SIZE(msm8x60_lcdc_configs)}, {msm8x60_mdp_vsync_configs, ARRAY_SIZE(msm8x60_mdp_vsync_configs)}, {msm8x60_hdmi_configs, ARRAY_SIZE(msm8x60_hdmi_configs)}, {msm8x60_pmic_configs, ARRAY_SIZE(msm8x60_pmic_configs)}, {msm8x60_common_configs, ARRAY_SIZE(msm8x60_common_configs)}, {msm8x60_cam_configs, ARRAY_SIZE(msm8x60_cam_configs)}, {msm8x60_tmg200_configs, ARRAY_SIZE(msm8x60_tmg200_configs)}, {msm8x60_charm_sdc_configs, ARRAY_SIZE(msm8x60_charm_sdc_configs)}, {msm8x60_charm_configs, ARRAY_SIZE(msm8x60_charm_configs)}, {NULL, 0}, }; #endif struct msm_gpiomux_configs msm8x60_lge_325_gpiomux_cfgs[] __initdata = { #ifdef CONFIG_LGE_PM_CURRENT_CONSUMPTION_FIX {msm8x60_current_configs, ARRAY_SIZE(msm8x60_current_configs)}, //for rock_bottom #endif #ifdef CONFIG_LGE_MHL_SII9244 {msm8x60_mhl_configs, ARRAY_SIZE(msm8x60_mhl_configs)}, #endif {msm8x60_gsbi_configs, ARRAY_SIZE(msm8x60_gsbi_configs)}, {msm8x60_uart_configs, ARRAY_SIZE(msm8x60_uart_configs)}, #ifdef CONFIG_MSM_GSBI9_UART {msm8x60_charm_uart_configs, ARRAY_SIZE(msm8x60_charm_uart_configs)}, #endif #ifdef CONFIG_LGE_IRDA {msm8x60_irda_uart_configs, ARRAY_SIZE(msm8x60_irda_uart_configs)}, #endif {msm8x60_aux_pcm_configs, ARRAY_SIZE(msm8x60_aux_pcm_configs)}, {msm8x60_sdc_configs, ARRAY_SIZE(msm8x60_sdc_configs)}, {msm8x60_snd_configs, ARRAY_SIZE(msm8x60_snd_configs)}, {msm8x60_mdp_vsync_configs, ARRAY_SIZE(msm8x60_mdp_vsync_configs)}, {msm8x60_hdmi_configs, ARRAY_SIZE(msm8x60_hdmi_configs)}, {msm8x60_pmic_configs, ARRAY_SIZE(msm8x60_pmic_configs)}, {msm8x60_common_configs, ARRAY_SIZE(msm8x60_common_configs)}, {msm8x60_charm_sdc_configs, ARRAY_SIZE(msm8x60_charm_sdc_configs)}, {msm8x60_charm_configs, ARRAY_SIZE(msm8x60_charm_configs)}, #ifdef CONFIG_LGE_AUDIO {msm8x60_audio_configs, ARRAY_SIZE(msm8x60_audio_configs)}, #endif #if defined(CONFIG_LGE_FELICA) {msm8x60_felica_uart_configs, ARRAY_SIZE(msm8x60_felica_uart_configs)}, #endif #if defined(CONFIG_LGE_NFC_PN544_C2) // {msm8x60_ebi2_configs, ARRAY_SIZE(msm8x60_ebi2_configs)}, #endif // {NULL, 0}, }; void __init msm8x60_init_gpiomux(struct msm_gpiomux_configs *cfgs) { int rc; rc = msm_gpiomux_init(NR_GPIO_IRQS); if (rc) { pr_err("%s failure: %d\n", __func__, rc); return; } while (cfgs->cfg) { msm_gpiomux_install(cfgs->cfg, cfgs->ncfg); ++cfgs; } }
lyfkevin/Wind_iproj_JB_kernel_old
lge/lge_board/batman/gpiomux_lge_325.c
C
gpl-2.0
33,789
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */ #include "test_sve_acle.h" /* ** qdmlalt_lane_0_s64_tied1: ** sqdmlalt z0\.d, z4\.s, z5\.s\[0\] ** ret */ TEST_DUAL_Z (qdmlalt_lane_0_s64_tied1, svint64_t, svint32_t, z0 = svqdmlalt_lane_s64 (z0, z4, z5, 0), z0 = svqdmlalt_lane (z0, z4, z5, 0)) /* ** qdmlalt_lane_0_s64_tied2: ** mov (z[0-9]+)\.d, z0\.d ** movprfx z0, z4 ** sqdmlalt z0\.d, \1\.s, z1\.s\[0\] ** ret */ TEST_DUAL_Z_REV (qdmlalt_lane_0_s64_tied2, svint64_t, svint32_t, z0_res = svqdmlalt_lane_s64 (z4, z0, z1, 0), z0_res = svqdmlalt_lane (z4, z0, z1, 0)) /* ** qdmlalt_lane_0_s64_tied3: ** mov (z[0-9]+)\.d, z0\.d ** movprfx z0, z4 ** sqdmlalt z0\.d, z1\.s, \1\.s\[0\] ** ret */ TEST_DUAL_Z_REV (qdmlalt_lane_0_s64_tied3, svint64_t, svint32_t, z0_res = svqdmlalt_lane_s64 (z4, z1, z0, 0), z0_res = svqdmlalt_lane (z4, z1, z0, 0)) /* ** qdmlalt_lane_0_s64_untied: ** movprfx z0, z1 ** sqdmlalt z0\.d, z4\.s, z5\.s\[0\] ** ret */ TEST_DUAL_Z (qdmlalt_lane_0_s64_untied, svint64_t, svint32_t, z0 = svqdmlalt_lane_s64 (z1, z4, z5, 0), z0 = svqdmlalt_lane (z1, z4, z5, 0)) /* ** qdmlalt_lane_z15_s64: ** str d15, \[sp, -16\]! ** sqdmlalt z0\.d, z1\.s, z15\.s\[1\] ** ldr d15, \[sp\], 16 ** ret */ TEST_DUAL_LANE_REG (qdmlalt_lane_z15_s64, svint64_t, svint32_t, z15, z0 = svqdmlalt_lane_s64 (z0, z1, z15, 1), z0 = svqdmlalt_lane (z0, z1, z15, 1)) /* ** qdmlalt_lane_z16_s64: ** mov (z[0-9]|z1[0-5])\.d, z16\.d ** sqdmlalt z0\.d, z1\.s, \1\.s\[1\] ** ret */ TEST_DUAL_LANE_REG (qdmlalt_lane_z16_s64, svint64_t, svint32_t, z16, z0 = svqdmlalt_lane_s64 (z0, z1, z16, 1), z0 = svqdmlalt_lane (z0, z1, z16, 1))
Gurgel100/gcc
gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/qdmlalt_lane_s64.c
C
gpl-2.0
1,699
/* RxRPC point-to-point transport session management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" static void rxrpc_transport_reaper(struct work_struct *work); static LIST_HEAD(rxrpc_transports); static DEFINE_RWLOCK(rxrpc_transport_lock); static unsigned long rxrpc_transport_timeout = 3600 * 24; static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); /* * allocate a new transport session manager */ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, struct rxrpc_peer *peer, gfp_t gfp) { struct rxrpc_transport *trans; _enter(""); trans = kzalloc(sizeof(struct rxrpc_transport), gfp); if (trans) { trans->local = local; trans->peer = peer; INIT_LIST_HEAD(&trans->link); trans->bundles = RB_ROOT; trans->client_conns = RB_ROOT; trans->server_conns = RB_ROOT; skb_queue_head_init(&trans->error_queue); spin_lock_init(&trans->client_lock); rwlock_init(&trans->conn_lock); atomic_set(&trans->usage, 1); trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); if (peer->srx.transport.family == AF_INET) { switch (peer->srx.transport_type) { case SOCK_DGRAM: INIT_WORK(&trans->error_handler, rxrpc_UDP_error_handler); break; default: BUG(); break; } } else { BUG(); } } _leave(" = %p", trans); return trans; } /* * obtain a transport session for the nominated endpoints */ struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, struct rxrpc_peer *peer, gfp_t gfp) { struct rxrpc_transport *trans, *candidate; const char *new = "old"; int usage; _enter("{%pI4+%hu},{%pI4+%hu},", &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port), &peer->srx.transport.sin.sin_addr, ntohs(peer->srx.transport.sin.sin_port)); /* search the transport list first */ read_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_transport; } read_unlock_bh(&rxrpc_transport_lock); /* not yet present - create a candidate for a new record and then * redo the search */ candidate = rxrpc_alloc_transport(local, peer, gfp); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } write_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_second; } /* we can now add the new candidate to the list */ trans = candidate; candidate = NULL; usage = atomic_read(&trans->usage); rxrpc_get_local(trans->local); atomic_inc(&trans->peer->usage); list_add_tail(&trans->link, &rxrpc_transports); write_unlock_bh(&rxrpc_transport_lock); new = "new"; success: _net("TRANSPORT %s %d local %d -> peer %d", new, trans->debug_id, trans->local->debug_id, trans->peer->debug_id); _leave(" = %p {u=%d}", trans, usage); return trans; /* we found the transport in the list immediately */ found_extant_transport: usage = atomic_inc_return(&trans->usage); read_unlock_bh(&rxrpc_transport_lock); goto success; /* we found the transport on the second time through the list */ found_extant_second: usage = atomic_inc_return(&trans->usage); write_unlock_bh(&rxrpc_transport_lock); kfree(candidate); goto success; } /* * find the transport connecting two endpoints */ struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, struct rxrpc_peer *peer) { struct rxrpc_transport *trans; _enter("{%pI4+%hu},{%pI4+%hu},", &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port), &peer->srx.transport.sin.sin_addr, ntohs(peer->srx.transport.sin.sin_port)); /* search the transport list */ read_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_transport; } read_unlock_bh(&rxrpc_transport_lock); _leave(" = NULL"); return NULL; found_extant_transport: atomic_inc(&trans->usage); read_unlock_bh(&rxrpc_transport_lock); _leave(" = %p", trans); return trans; } /* * release a transport session */ void rxrpc_put_transport(struct rxrpc_transport *trans) { _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); ASSERTCMP(atomic_read(&trans->usage), >, 0); trans->put_time = get_seconds(); if (unlikely(atomic_dec_and_test(&trans->usage))) { _debug("zombie"); /* let the reaper determine the timeout to avoid a race with * overextending the timeout if the reaper is running at the * same time */ rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); } _leave(""); } /* * clean up a transport session */ static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) { _net("DESTROY TRANS %d", trans->debug_id); rxrpc_purge_queue(&trans->error_queue); rxrpc_put_local(trans->local); rxrpc_put_peer(trans->peer); kfree(trans); } /* * reap dead transports that have passed their expiry date */ static void rxrpc_transport_reaper(struct work_struct *work) { struct rxrpc_transport *trans, *_p; unsigned long now, earliest, reap_time; LIST_HEAD(graveyard); _enter(""); now = get_seconds(); earliest = ULONG_MAX; /* extract all the transports that have been dead too long */ write_lock_bh(&rxrpc_transport_lock); list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { _debug("reap TRANS %d { u=%d t=%ld }", trans->debug_id, atomic_read(&trans->usage), (long) now - (long) trans->put_time); if (likely(atomic_read(&trans->usage) > 0)) continue; reap_time = trans->put_time + rxrpc_transport_timeout; if (reap_time <= now) list_move_tail(&trans->link, &graveyard); else if (reap_time < earliest) earliest = reap_time; } write_unlock_bh(&rxrpc_transport_lock); if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); ASSERTCMP(earliest, >, now); rxrpc_queue_delayed_work(&rxrpc_transport_reap, (earliest - now) * HZ); } /* then destroy all those pulled out */ while (!list_empty(&graveyard)) { trans = list_entry(graveyard.next, struct rxrpc_transport, link); list_del_init(&trans->link); ASSERTCMP(atomic_read(&trans->usage), ==, 0); rxrpc_cleanup_transport(trans); } _leave(""); } /* * preemptively destroy all the transport session records rather than waiting * for them to time out */ void __exit rxrpc_destroy_all_transports(void) { _enter(""); rxrpc_transport_timeout = 0; cancel_delayed_work(&rxrpc_transport_reap); rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); _leave(""); }
ircncl/linux-grsec-incremental
net/rxrpc/ar-transport.c
C
gpl-2.0
7,246
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * HTC: elite machine driver which defines board-specific data * Copy from sound/soc/msm/msm8960.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/jack.h> #include <asm/mach-types.h> #include <mach/socinfo.h> #include <linux/mfd/wcd9xxx/core.h> #include "../../../sound/soc/codecs/wcd9310.h" #include "../sound/soc/msm/msm-pcm-routing.h" #include "board-elite.h" #include <mach/cable_detect.h> #include <mach/board.h> #define MSM_SPK_ON 1 #define MSM_SPK_OFF 0 #define MSM_SLIM_0_RX_MAX_CHANNELS 2 #define MSM_SLIM_0_TX_MAX_CHANNELS 4 #define SAMPLE_RATE_8KHZ 8000 #define SAMPLE_RATE_16KHZ 16000 #define BOTTOM_SPK_AMP_POS 0x1 #define BOTTOM_SPK_AMP_NEG 0x2 #define TOP_SPK_AMP_POS 0x4 #define TOP_SPK_AMP_NEG 0x8 #define DOCK_SPK_AMP_POS 0x10 #define DOCK_SPK_AMP_NEG 0x20 #define GPIO_AUX_PCM_DOUT 63 #define GPIO_AUX_PCM_DIN 64 #define GPIO_AUX_PCM_SYNC 65 #define GPIO_AUX_PCM_CLK 66 #define TABLA_EXT_CLK_RATE 12288000 #define ELITE_AUD_STEREO_REC (PM8921_GPIO_PM_TO_SYS(3)) #define top_spk_pamp_gpio (PM8921_GPIO_PM_TO_SYS(19)) #define bottom_spk_pamp_gpio (PM8921_GPIO_PM_TO_SYS(18)) #define DOCK_SPK_PAMP_GPIO (PM8921_GPIO_PM_TO_SYS(1)) #define USB_ID_ADC_GPIO (PM8921_GPIO_PM_TO_SYS(4)) static int msm_spk_control; static int msm_ext_bottom_spk_pamp; static int msm_ext_top_spk_pamp; static int msm_ext_dock_spk_pamp; static int msm_slim_0_rx_ch = 1; static int msm_slim_0_tx_ch = 1; static int msm_btsco_rate = SAMPLE_RATE_8KHZ; static int msm_btsco_ch = 1; static int msm_auxpcm_rate = SAMPLE_RATE_8KHZ; static int elite_stereo_control; static struct clk *codec_clk; static int clk_users; extern void msm_release_audio_dock_lock(void); static struct snd_soc_jack hs_jack; static struct snd_soc_jack button_jack; static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable, bool dapm); extern void release_audio_dock_lock(void); static struct mutex cdc_mclk_mutex; static void msm_ext_spk_power_amp_off(u32); static void audio_dock_notifier_func(enum usb_connect_type online) { if (cable_get_accessory_type() != DOCK_STATE_AUDIO_DOCK) { pr_debug("accessory is not AUDIO_DOCK\n"); return; } switch(online) { case CONNECT_TYPE_NONE: pr_debug("%s, VBUS is removed\n", __func__); msm_ext_spk_power_amp_off(DOCK_SPK_AMP_POS|DOCK_SPK_AMP_NEG); release_audio_dock_lock(); break; default: break; } return; } static struct mutex audio_notifier_lock; static struct t_cable_status_notifier audio_dock_notifier = { .name = "elite_audio_8960", .func = audio_dock_notifier_func, }; static void msm_enable_ext_spk_amp_gpio(u32 spk_amp_gpio) { int ret = 0; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; if (spk_amp_gpio == bottom_spk_pamp_gpio) { ret = gpio_request(bottom_spk_pamp_gpio, "BOTTOM_SPK_AMP"); if (ret) { pr_err("%s: Error requesting BOTTOM SPK AMP GPIO %u\n", __func__, bottom_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(bottom_spk_pamp_gpio, &param); if (ret) pr_err("%s: Failed to configure Bottom Spk Ampl" " gpio %u\n", __func__, bottom_spk_pamp_gpio); else { pr_debug("%s: enable Bottom spkr amp gpio\n", __func__); gpio_direction_output(bottom_spk_pamp_gpio, 1); } } else if (spk_amp_gpio == top_spk_pamp_gpio) { ret = gpio_request(top_spk_pamp_gpio, "TOP_SPK_AMP"); if (ret) { pr_err("%s: Error requesting GPIO %d\n", __func__, top_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(top_spk_pamp_gpio, &param); if (ret) pr_err("%s: Failed to configure Top Spk Ampl" " gpio %u\n", __func__, top_spk_pamp_gpio); else { pr_debug("%s: enable Top spkr amp gpio\n", __func__); gpio_direction_output(top_spk_pamp_gpio, 1); } } else if (spk_amp_gpio == DOCK_SPK_PAMP_GPIO) { ret = gpio_request(DOCK_SPK_PAMP_GPIO, "DOCK_SPK_AMP"); if (ret) { pr_err("%s: Error requesting GPIO %d\n", __func__, DOCK_SPK_PAMP_GPIO); return; } ret = pm8xxx_gpio_config(DOCK_SPK_PAMP_GPIO, &param); if (ret) pr_err("%s: Failed to configure Dock Spk Ampl" " gpio %u\n", __func__, DOCK_SPK_PAMP_GPIO); else { pr_debug("%s: enable dock amp gpio\n", __func__); gpio_direction_output(DOCK_SPK_PAMP_GPIO, 1); } ret = gpio_request(USB_ID_ADC_GPIO, "USB_ID_ADC"); if (ret) { pr_err("%s: Error requesting USB_ID_ADC PMIC GPIO %u\n", __func__, USB_ID_ADC_GPIO); return; } ret = pm8xxx_gpio_config(USB_ID_ADC_GPIO, &param); if (ret) pr_err("%s: Failed to configure USB_ID_ADC PMIC" " gpio %u\n", __func__, USB_ID_ADC_GPIO); } else { pr_err("%s: ERROR : Invalid External Speaker Ampl GPIO." " gpio = %u\n", __func__, spk_amp_gpio); return; } } static void msm_ext_spk_power_amp_on(u32 spk) { if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { pr_debug("%s() External Bottom Speaker Ampl already " "turned on. spk = 0x%08x\n", __func__, spk); return; } msm_ext_bottom_spk_pamp |= spk; if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { msm_enable_ext_spk_amp_gpio(bottom_spk_pamp_gpio); pr_debug("%s: slepping 4 ms after turning on external " " Bottom Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { pr_debug("%s() External Top Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm_ext_top_spk_pamp |= spk; if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { msm_enable_ext_spk_amp_gpio(top_spk_pamp_gpio); pr_debug("%s: sleeping 4 ms after turning on " " external HAC Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (DOCK_SPK_AMP_POS | DOCK_SPK_AMP_NEG)) { mutex_lock(&audio_notifier_lock); if ((msm_ext_dock_spk_pamp & DOCK_SPK_AMP_POS) && (msm_ext_dock_spk_pamp & DOCK_SPK_AMP_NEG)) { pr_debug("%s() External Dock Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm_ext_dock_spk_pamp |= spk; if ((msm_ext_dock_spk_pamp & DOCK_SPK_AMP_POS) && (msm_ext_dock_spk_pamp & DOCK_SPK_AMP_NEG)) { msm_enable_ext_spk_amp_gpio(DOCK_SPK_PAMP_GPIO); pr_debug("%s: sleeping 4 ms after turning on " " external DOCK Ampl\n", __func__); usleep_range(4000, 4000); } mutex_unlock(&audio_notifier_lock); } else { pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm_ext_spk_power_amp_off(u32 spk) { struct pm_gpio param = { .direction = PM_GPIO_DIR_IN, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; pr_debug("%s, spk = %d\n", __func__, spk); if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if (!msm_ext_bottom_spk_pamp) return; gpio_direction_output(bottom_spk_pamp_gpio, 0); gpio_free(bottom_spk_pamp_gpio); msm_ext_bottom_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external Bottom" " Speaker Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if (!msm_ext_top_spk_pamp) return; gpio_direction_output(top_spk_pamp_gpio, 0); gpio_free(top_spk_pamp_gpio); msm_ext_top_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external" " HAC Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (DOCK_SPK_AMP_POS | DOCK_SPK_AMP_NEG)) { mutex_lock(&audio_notifier_lock); if (!msm_ext_dock_spk_pamp) { mutex_unlock(&audio_notifier_lock); return; } gpio_direction_input(DOCK_SPK_PAMP_GPIO); gpio_free(DOCK_SPK_PAMP_GPIO); gpio_direction_input(USB_ID_ADC_GPIO); if (pm8xxx_gpio_config(USB_ID_ADC_GPIO, &param)) pr_err("%s: Failed to configure USB_ID_ADC PMIC" " gpio %u\n", __func__, USB_ID_ADC_GPIO); gpio_free(USB_ID_ADC_GPIO); msm_ext_dock_spk_pamp = 0; mutex_unlock(&audio_notifier_lock); pr_debug("%s: sleeping 4 ms after turning off external" " DOCK Ampl\n", __func__); usleep_range(4000, 4000); } else { pr_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm_ext_control(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; mutex_lock(&dapm->codec->mutex); pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control); if (msm_spk_control == MSM_SPK_ON) { snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg"); snd_soc_dapm_enable_pin(dapm, "Dock Spk Pos"); snd_soc_dapm_enable_pin(dapm, "Dock Spk Neg"); } else { snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg"); snd_soc_dapm_disable_pin(dapm, "Dock Spk Pos"); snd_soc_dapm_disable_pin(dapm, "Dock Spk Neg"); } snd_soc_dapm_sync(dapm); mutex_unlock(&dapm->codec->mutex); } static int msm_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control); ucontrol->value.integer.value[0] = msm_spk_control; return 0; } static int msm_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); pr_debug("%s()\n", __func__); if (msm_spk_control == ucontrol->value.integer.value[0]) return 0; msm_spk_control = ucontrol->value.integer.value[0]; msm_ext_control(codec); return 1; } static int msm_spkramp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event)); if (SND_SOC_DAPM_EVENT_ON(event)) { if (!strncmp(w->name, "Ext Spk Bottom Pos", 18)) msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18)) msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_NEG); else if (!strncmp(w->name, "Ext Spk Top Pos", 15)) msm_ext_spk_power_amp_on(TOP_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Top Neg", 15)) msm_ext_spk_power_amp_on(TOP_SPK_AMP_NEG); else if (!strncmp(w->name, "Dock Spk Pos", 12)) msm_ext_spk_power_amp_on(DOCK_SPK_AMP_POS); else if (!strncmp(w->name, "Dock Spk Neg", 12)) msm_ext_spk_power_amp_on(DOCK_SPK_AMP_NEG); else { pr_err("%s() Invalid Speaker Widget = %s\n", __func__, w->name); return -EINVAL; } } else { if (!strncmp(w->name, "Ext Spk Bottom Pos", 18)) msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18)) msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_NEG); else if (!strncmp(w->name, "Ext Spk Top Pos", 15)) msm_ext_spk_power_amp_off(TOP_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Top Neg", 15)) msm_ext_spk_power_amp_off(TOP_SPK_AMP_NEG); else if (!strncmp(w->name, "Dock Spk Pos", 12)) msm_ext_spk_power_amp_off(DOCK_SPK_AMP_POS); else if (!strncmp(w->name, "Dock Spk Neg", 12)) msm_ext_spk_power_amp_off(DOCK_SPK_AMP_NEG); else { pr_err("%s() Invalid Speaker Widget = %s\n", __func__, w->name); return -EINVAL; } } return 0; } static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable, bool dapm) { int r = 0; pr_debug("%s: enable = %d\n", __func__, enable); mutex_lock(&cdc_mclk_mutex); if (enable) { clk_users++; pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users == 1) { if (codec_clk) { clk_set_rate(codec_clk, TABLA_EXT_CLK_RATE); clk_prepare_enable(codec_clk); tabla_mclk_enable(codec, 1, dapm); } else { pr_err("%s: Error setting Tabla MCLK\n", __func__); clk_users--; r = -EINVAL; } } } else { if (clk_users > 0) { clk_users--; pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users == 0) { pr_debug("%s: disabling MCLK. clk_users = %d\n", __func__, clk_users); tabla_mclk_enable(codec, 0, dapm); clk_disable_unprepare(codec_clk); } } else { pr_err("%s: Error releasing Tabla MCLK\n", __func__); r = -EINVAL; } } mutex_unlock(&cdc_mclk_mutex); return r; } static int msm_mclk_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { pr_debug("%s: event = %d\n", __func__, event); switch (event) { case SND_SOC_DAPM_PRE_PMU: return msm_enable_codec_ext_clk(w->codec, 1, true); case SND_SOC_DAPM_POST_PMD: return msm_enable_codec_ext_clk(w->codec, 0, true); } return 0; } enum { RX_SWITCH_INDEX = 0, TX_SWITCH_INDEX, SWITCH_MAX, }; static const struct snd_kcontrol_new extspk_switch_controls = SOC_DAPM_SINGLE("Switch", RX_SWITCH_INDEX, 0, 1, 0); static const struct snd_kcontrol_new earamp_switch_controls = SOC_DAPM_SINGLE("Switch", RX_SWITCH_INDEX, 0, 1, 0); static const struct snd_kcontrol_new spkamp_switch_controls = SOC_DAPM_SINGLE("Switch", RX_SWITCH_INDEX, 0, 1, 0); static const struct snd_kcontrol_new micbias3_switch_controls = SOC_DAPM_SINGLE("Switch", TX_SWITCH_INDEX, 0, 1, 0); static const struct snd_soc_dapm_widget elite_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Lineout Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPK AMP EN", SND_SOC_NOPM, 0, 0, &spkamp_switch_controls, 1), SND_SOC_DAPM_MIXER("HAC AMP EN", SND_SOC_NOPM, 0, 0, &earamp_switch_controls, 1), SND_SOC_DAPM_MIXER("DOCK AMP EN", SND_SOC_NOPM, 0, 0, &extspk_switch_controls, 1), SND_SOC_DAPM_MIXER("DUAL MICBIAS", SND_SOC_NOPM, 0, 0, &micbias3_switch_controls, 1), }; static const struct snd_soc_dapm_widget msm_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, msm_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SPK("Ext Spk Bottom Pos", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Bottom Neg", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Top Pos", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Top Neg", msm_spkramp_event), SND_SOC_DAPM_SPK("Dock Spk Pos", msm_spkramp_event), SND_SOC_DAPM_SPK("Dock Spk Neg", msm_spkramp_event), SND_SOC_DAPM_MIC("Handset Mic", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Back Mic", NULL), SND_SOC_DAPM_MIC("Digital Mic1", NULL), SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL), SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL), SND_SOC_DAPM_MIC("Digital Mic1", NULL), SND_SOC_DAPM_MIC("Digital Mic2", NULL), SND_SOC_DAPM_MIC("Digital Mic3", NULL), SND_SOC_DAPM_MIC("Digital Mic4", NULL), SND_SOC_DAPM_MIC("Digital Mic5", NULL), SND_SOC_DAPM_MIC("Digital Mic6", NULL), }; static const struct snd_soc_dapm_route tabla_1_x_audio_map[] = { {"Lineout Mixer", NULL, "LINEOUT2"}, {"Lineout Mixer", NULL, "LINEOUT1"}, }; static const struct snd_soc_dapm_route tabla_2_x_audio_map[] = { {"Lineout Mixer", NULL, "LINEOUT3"}, {"Lineout Mixer", NULL, "LINEOUT1"}, }; static const struct snd_soc_dapm_route common_audio_map[] = { {"RX_BIAS", NULL, "MCLK"}, {"LDO_H", NULL, "MCLK"}, {"Ext Spk Bottom Pos", NULL, "SPK AMP EN"}, {"Ext Spk Bottom Neg", NULL, "SPK AMP EN"}, {"SPK AMP EN", "Switch", "Lineout Mixer"}, {"Ext Spk Top Pos", NULL, "HAC AMP EN"}, {"Ext Spk Top Neg", NULL, "HAC AMP EN"}, {"HAC AMP EN", "Switch", "Lineout Mixer"}, {"Dock Spk Pos", NULL, "DOCK AMP EN"}, {"Dock Spk Neg", NULL, "DOCK AMP EN"}, {"DOCK AMP EN", "Switch", "Lineout Mixer"}, {"AMIC1", NULL, "DUAL MICBIAS"}, {"DUAL MICBIAS", NULL, "MIC BIAS1 External"}, {"MIC BIAS1 External", NULL, "Handset Mic"}, {"DUAL MICBIAS", "Switch", "MIC BIAS3 External"}, {"AMIC2", NULL, "MIC BIAS2 External"}, {"MIC BIAS2 External", NULL, "Headset Mic"}, {"AMIC3", NULL, "MIC BIAS3 External"}, {"MIC BIAS3 External", NULL, "Back Mic"}, {"HEADPHONE", NULL, "LDO_H"}, }; static const char *spk_function[] = {"Off", "On"}; static const char *slim0_rx_ch_text[] = {"One", "Two"}; static const char *slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"}; static const struct soc_enum msm_enum[] = { SOC_ENUM_SINGLE_EXT(2, spk_function), SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text), SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text), }; static const char *stereo_mic_voice[] = {"Off", "On"}; static const struct soc_enum elite_msm_enum[] = { SOC_ENUM_SINGLE_EXT(2, stereo_mic_voice), }; static const char *btsco_rate_text[] = {"8000", "16000"}; static const struct soc_enum msm_btsco_enum[] = { SOC_ENUM_SINGLE_EXT(2, btsco_rate_text), }; static const char *auxpcm_rate_text[] = {"rate_8000", "rate_16000"}; static const struct soc_enum msm_auxpcm_enum[] = { SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text), }; static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__, msm_slim_0_rx_ch); ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1; return 0; } static int msm_slim_0_rx_ch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1; pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__, msm_slim_0_rx_ch); return 1; } static int msm_slim_0_tx_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch); ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1; return 0; } static int msm_slim_0_tx_ch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1; pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch); return 1; } static int elite_stereo_voice_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: elite_stereo_control = %d\n", __func__, elite_stereo_control); ucontrol->value.integer.value[0] = elite_stereo_control; return 0; } static int elite_stereo_voice_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_L17, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; if (elite_stereo_control == ucontrol->value.integer.value[0]) return 0; elite_stereo_control = ucontrol->value.integer.value[0]; pr_debug("%s: elite_stereo_control = %d\n", __func__, elite_stereo_control); switch (ucontrol->value.integer.value[0]) { case 0: gpio_direction_output(ELITE_AUD_STEREO_REC, 1); gpio_free(ELITE_AUD_STEREO_REC); break; case 1: ret = gpio_request(ELITE_AUD_STEREO_REC, "A1028_SWITCH"); if (ret) { pr_err("%s: Failed to request gpio %d\n", __func__, ELITE_AUD_STEREO_REC); return ret; } ret = pm8xxx_gpio_config(ELITE_AUD_STEREO_REC, &param); if (ret) pr_err("%s: Failed to configure gpio %d\n", __func__, ELITE_AUD_STEREO_REC); else gpio_direction_output(ELITE_AUD_STEREO_REC, 0); break; } return ret; } static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_btsco_rate = %d", __func__, msm_btsco_rate); ucontrol->value.integer.value[0] = msm_btsco_rate; return 0; } static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (ucontrol->value.integer.value[0]) { case 0: msm_btsco_rate = SAMPLE_RATE_8KHZ; break; case 1: msm_btsco_rate = SAMPLE_RATE_16KHZ; break; default: msm_btsco_rate = SAMPLE_RATE_8KHZ; break; } pr_debug("%s: msm_btsco_rate = %d\n", __func__, msm_btsco_rate); return 0; } static int msm_auxpcm_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_auxpcm_rate = %d", __func__, msm_auxpcm_rate); ucontrol->value.integer.value[0] = msm_auxpcm_rate; return 0; } static int msm_auxpcm_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (ucontrol->value.integer.value[0]) { case 0: msm_auxpcm_rate = SAMPLE_RATE_8KHZ; break; case 1: msm_auxpcm_rate = SAMPLE_RATE_16KHZ; break; default: msm_auxpcm_rate = SAMPLE_RATE_8KHZ; break; } pr_debug("%s: msm_auxpcm_rate = %d" "ucontrol->value.integer.value[0] = %d\n", __func__, msm_auxpcm_rate, (int)ucontrol->value.integer.value[0]); return 0; } static const struct snd_kcontrol_new tabla_msm_controls[] = { SOC_ENUM_EXT("Speaker Function", msm_enum[0], msm_get_spk, msm_set_spk), SOC_ENUM_EXT("SLIM_0_RX Channels", msm_enum[1], msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put), SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2], msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put), SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0], msm_btsco_rate_get, msm_btsco_rate_put), SOC_ENUM_EXT("AUX PCM SampleRate", msm_auxpcm_enum[0], msm_auxpcm_rate_get, msm_auxpcm_rate_put), SOC_ENUM_EXT("Stereo Selection", elite_msm_enum[0], elite_stereo_voice_get, elite_stereo_voice_put), }; static int msm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS]; unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { pr_debug("%s: %s rx_dai_id = %d num_ch = %d\n", __func__, codec_dai->name, codec_dai->id, msm_slim_0_rx_ch); ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0, msm_slim_0_rx_ch, rx_ch); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(codec_dai, 0, 0, msm_slim_0_rx_ch, rx_ch); if (ret < 0) { pr_err("%s: failed to set codec channel map\n", __func__); goto end; } } else { pr_debug("%s: %s tx_dai_id = %d num_ch = %d\n", __func__, codec_dai->name, codec_dai->id, msm_slim_0_tx_ch); ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, msm_slim_0_tx_ch, tx_ch, 0 , 0); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(codec_dai, msm_slim_0_tx_ch, tx_ch, 0, 0); if (ret < 0) { pr_err("%s: failed to set codec channel map\n", __func__); goto end; } } end: return ret; } static int msm_slimbus_2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS]; unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0; unsigned int num_tx_ch = 0; unsigned int num_rx_ch = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { num_rx_ch = params_channels(params); pr_debug("%s: %s rx_dai_id = %d num_ch = %d\n", __func__, codec_dai->name, codec_dai->id, num_rx_ch); ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0, num_rx_ch, rx_ch); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(codec_dai, 0, 0, num_rx_ch, rx_ch); if (ret < 0) { pr_err("%s: failed to set codec channel map\n", __func__); goto end; } } else { num_tx_ch = params_channels(params); pr_debug("%s: %s tx_dai_id = %d num_ch = %d\n", __func__, codec_dai->name, codec_dai->id, num_tx_ch); ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, num_tx_ch, tx_ch, 0 , 0); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(codec_dai, num_tx_ch, tx_ch, 0, 0); if (ret < 0) { pr_err("%s: failed to set codec channel map\n", __func__); goto end; } } end: return ret; } static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) { int err; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; pr_debug("%s(), dev_name: %s\n", __func__, dev_name(cpu_dai->dev)); snd_soc_dapm_new_controls(dapm, msm_dapm_widgets, ARRAY_SIZE(msm_dapm_widgets)); snd_soc_dapm_new_controls(dapm, elite_dapm_widgets, ARRAY_SIZE(elite_dapm_widgets)); snd_soc_dapm_add_routes(dapm, common_audio_map, ARRAY_SIZE(common_audio_map)); pr_debug("%s(), %s\n", __func__, codec->name); if (!strncmp(codec->name, "tabla1x_codec", 13)) snd_soc_dapm_add_routes(dapm, tabla_1_x_audio_map, ARRAY_SIZE(tabla_1_x_audio_map)); else snd_soc_dapm_add_routes(dapm, tabla_2_x_audio_map, ARRAY_SIZE(tabla_2_x_audio_map)); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg"); snd_soc_dapm_enable_pin(dapm, "Dock Spk Pos"); snd_soc_dapm_enable_pin(dapm, "Dock Spk Neg"); snd_soc_dapm_sync(dapm); err = snd_soc_jack_new(codec, "Headset Jack", (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED), &hs_jack); if (err) { pr_err("failed to create new jack\n"); return err; } err = snd_soc_jack_new(codec, "Button Jack", TABLA_JACK_BUTTON_MASK, &button_jack); if (err) { pr_err("failed to create new jack\n"); return err; } codec_clk = clk_get(cpu_dai->dev, "osr_clk"); return err; } static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; channels->min = channels->max = msm_slim_0_rx_ch; return 0; } static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; channels->min = channels->max = msm_slim_0_tx_ch; return 0; } static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; return 0; } static int msm_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s channels->min %u channels->max %u ()\n", __func__, channels->min, channels->max); rate->min = rate->max = 48000; return 0; } static int msm_btsco_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); rate->min = rate->max = msm_btsco_rate; channels->min = channels->max = msm_btsco_ch; return 0; } static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); rate->min = rate->max = msm_auxpcm_rate; channels->min = channels->max = 1; return 0; } static int msm_aux_pcm_get_gpios(void) { int ret = 0; pr_debug("%s\n", __func__); ret = gpio_request(GPIO_AUX_PCM_DOUT, "AUX PCM DOUT"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM DOUT", __func__, GPIO_AUX_PCM_DOUT); goto fail_dout; } ret = gpio_request(GPIO_AUX_PCM_DIN, "AUX PCM DIN"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM DIN", __func__, GPIO_AUX_PCM_DIN); goto fail_din; } ret = gpio_request(GPIO_AUX_PCM_SYNC, "AUX PCM SYNC"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM SYNC", __func__, GPIO_AUX_PCM_SYNC); goto fail_sync; } ret = gpio_request(GPIO_AUX_PCM_CLK, "AUX PCM CLK"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM CLK", __func__, GPIO_AUX_PCM_CLK); goto fail_clk; } return 0; fail_clk: gpio_free(GPIO_AUX_PCM_SYNC); fail_sync: gpio_free(GPIO_AUX_PCM_DIN); fail_din: gpio_free(GPIO_AUX_PCM_DOUT); fail_dout: return ret; } static int msm_aux_pcm_free_gpios(void) { gpio_free(GPIO_AUX_PCM_DIN); gpio_free(GPIO_AUX_PCM_DOUT); gpio_free(GPIO_AUX_PCM_SYNC); gpio_free(GPIO_AUX_PCM_CLK); return 0; } static int msm_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; pr_debug("%s(): dai_link_str_name = %s cpu_dai = %s codec_dai = %s\n", __func__, rtd->dai_link->stream_name, rtd->dai_link->cpu_dai_name, rtd->dai_link->codec_dai_name); return 0; } static int msm_auxpcm_startup(struct snd_pcm_substream *substream) { int ret = 0; pr_debug("%s(): substream = %s\n", __func__, substream->name); ret = msm_aux_pcm_get_gpios(); if (ret < 0) { pr_err("%s: Aux PCM GPIO request failed\n", __func__); return -EINVAL; } return 0; } static void msm_auxpcm_shutdown(struct snd_pcm_substream *substream) { pr_debug("%s(): substream = %s\n", __func__, substream->name); msm_aux_pcm_free_gpios(); } static void msm_shutdown(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; pr_debug("%s(): dai_link str_name = %s cpu_dai = %s codec_dai = %s\n", __func__, rtd->dai_link->stream_name, rtd->dai_link->cpu_dai_name, rtd->dai_link->codec_dai_name); } static struct snd_soc_ops msm_be_ops = { .startup = msm_startup, .hw_params = msm_hw_params, .shutdown = msm_shutdown, }; static struct snd_soc_ops msm_auxpcm_be_ops = { .startup = msm_auxpcm_startup, .shutdown = msm_auxpcm_shutdown, }; static struct snd_soc_ops msm_slimbus_2_be_ops = { .startup = msm_startup, .hw_params = msm_slimbus_2_hw_params, .shutdown = msm_shutdown, }; static struct snd_soc_dai_link msm_dai_common[] = { { .name = "MSM8960 Media1", .stream_name = "MultiMedia1", .cpu_dai_name = "MultiMedia1", .platform_name = "msm-pcm-dsp", .dynamic = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1 }, { .name = "MSM8960 Media2", .stream_name = "MultiMedia2", .cpu_dai_name = "MultiMedia2", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2, }, { .name = "Circuit-Switch Voice", .stream_name = "CS-Voice", .cpu_dai_name = "CS-VOICE", .platform_name = "msm-pcm-voice", .dynamic = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_CS_VOICE, }, { .name = "MSM VoIP", .stream_name = "VoIP", .cpu_dai_name = "VoIP", .platform_name = "msm-voip-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_VOIP, }, { .name = "MSM8960 LPA", .stream_name = "LPA", .cpu_dai_name = "MultiMedia3", .platform_name = "msm-pcm-lpa", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3, }, { .name = "SLIMBUS_0 Hostless", .stream_name = "SLIMBUS_0 Hostless", .cpu_dai_name = "SLIMBUS0_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "INT_FM Hostless", .stream_name = "INT_FM Hostless", .cpu_dai_name = "INT_FM_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "MSM AFE-PCM RX", .stream_name = "AFE-PROXY RX", .cpu_dai_name = "msm-dai-q6.241", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .platform_name = "msm-pcm-afe", .ignore_suspend = 1, .ignore_pmdown_time = 1, }, { .name = "MSM AFE-PCM TX", .stream_name = "AFE-PROXY TX", .cpu_dai_name = "msm-dai-q6.240", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .platform_name = "msm-pcm-afe", .ignore_suspend = 1, }, { .name = "MSM8960 Compr", .stream_name = "COMPR", .cpu_dai_name = "MultiMedia4", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4, }, { .name = "AUXPCM Hostless", .stream_name = "AUXPCM Hostless", .cpu_dai_name = "AUXPCM_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "HDMI_RX_HOSTLESS", .stream_name = "HDMI_RX_HOSTLESS", .cpu_dai_name = "HDMI_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "VoLTE", .stream_name = "VoLTE", .cpu_dai_name = "VoLTE", .platform_name = "msm-pcm-voice", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .be_id = MSM_FRONTEND_DAI_VOLTE, }, { .name = "Voice2", .stream_name = "Voice2", .cpu_dai_name = "Voice2", .platform_name = "msm-pcm-voice", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .be_id = MSM_FRONTEND_DAI_VOICE2, }, { .name = "MSM8960 LowLatency", .stream_name = "MultiMedia5", .cpu_dai_name = "MultiMedia5", .platform_name = "msm-lowlatency-pcm-dsp", .dynamic = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .ignore_suspend = 1, /* this dainlink has playback support */ .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5, }, { .name = LPASS_BE_INT_BT_SCO_RX, .stream_name = "Internal BT-SCO Playback", .cpu_dai_name = "msm-dai-q6.12288", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_BT_SCO_RX, .be_hw_params_fixup = msm_btsco_be_hw_params_fixup, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_INT_BT_SCO_TX, .stream_name = "Internal BT-SCO Capture", .cpu_dai_name = "msm-dai-q6.12289", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_BT_SCO_TX, .be_hw_params_fixup = msm_btsco_be_hw_params_fixup, }, { .name = LPASS_BE_INT_FM_RX, .stream_name = "Internal FM Playback", .cpu_dai_name = "msm-dai-q6.12292", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_FM_RX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_INT_FM_TX, .stream_name = "Internal FM Capture", .cpu_dai_name = "msm-dai-q6.12293", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_FM_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, }, { .name = LPASS_BE_HDMI, .stream_name = "HDMI Playback", .cpu_dai_name = "msm-dai-q6-hdmi.8", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_HDMI_RX, .be_hw_params_fixup = msm_hdmi_be_hw_params_fixup, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_AFE_PCM_RX, .stream_name = "AFE Playback", .cpu_dai_name = "msm-dai-q6.224", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AFE_PCM_RX, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_AFE_PCM_TX, .stream_name = "AFE Capture", .cpu_dai_name = "msm-dai-q6.225", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AFE_PCM_TX, }, { .name = LPASS_BE_AUXPCM_RX, .stream_name = "AUX PCM Playback", .cpu_dai_name = "msm-dai-q6.2", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AUXPCM_RX, .be_hw_params_fixup = msm_auxpcm_be_params_fixup, .ops = &msm_auxpcm_be_ops, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_AUXPCM_TX, .stream_name = "AUX PCM Capture", .cpu_dai_name = "msm-dai-q6.3", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AUXPCM_TX, .be_hw_params_fixup = msm_auxpcm_be_params_fixup, }, { .name = LPASS_BE_VOICE_PLAYBACK_TX, .stream_name = "Voice Farend Playback", .cpu_dai_name = "msm-dai-q6.32773", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, }, { .name = LPASS_BE_INCALL_RECORD_TX, .stream_name = "Voice Uplink Capture", .cpu_dai_name = "msm-dai-q6.32772", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, }, { .name = LPASS_BE_INCALL_RECORD_RX, .stream_name = "Voice Downlink Capture", .cpu_dai_name = "msm-dai-q6.32771", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_pmdown_time = 1, }, { .name = "MSM8960 Media6", .stream_name = "MultiMedia6", .cpu_dai_name = "MultiMedia6", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6 }, { .name = "MSM8960 Compr2", .stream_name = "COMPR2", .cpu_dai_name = "MultiMedia7", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7, }, { .name = "MSM8960 Compr3", .stream_name = "COMPR3", .cpu_dai_name = "MultiMedia8", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8, }, }; static struct snd_soc_dai_link msm_dai_delta_tabla1x[] = { { .name = LPASS_BE_SLIMBUS_0_RX, .stream_name = "Slimbus Playback", .cpu_dai_name = "msm-dai-q6.16384", .platform_name = "msm-pcm-routing", .codec_name = "tabla1x_codec", .codec_dai_name = "tabla_rx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX, .init = &msm_audrx_init, .be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup, .ops = &msm_be_ops, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_SLIMBUS_0_TX, .stream_name = "Slimbus Capture", .cpu_dai_name = "msm-dai-q6.16385", .platform_name = "msm-pcm-routing", .codec_name = "tabla1x_codec", .codec_dai_name = "tabla_tx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX, .be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup, .ops = &msm_be_ops, }, { .name = "SLIMBUS_2 Hostless Capture", .stream_name = "SLIMBUS_2 Hostless Capture", .cpu_dai_name = "msm-dai-q6.16389", .platform_name = "msm-pcm-hostless", .codec_name = "tabla1x_codec", .codec_dai_name = "tabla_tx2", .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ops = &msm_slimbus_2_be_ops, }, { .name = "SLIMBUS_2 Hostless Playback", .stream_name = "SLIMBUS_2 Hostless Playback", .cpu_dai_name = "msm-dai-q6.16388", .platform_name = "msm-pcm-hostless", .codec_name = "tabla1x_codec", .codec_dai_name = "tabla_rx3", .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ops = &msm_slimbus_2_be_ops, }, }; static struct snd_soc_dai_link msm_dai_delta_tabla2x[] = { { .name = LPASS_BE_SLIMBUS_0_RX, .stream_name = "Slimbus Playback", .cpu_dai_name = "msm-dai-q6.16384", .platform_name = "msm-pcm-routing", .codec_name = "tabla_codec", .codec_dai_name = "tabla_rx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX, .init = &msm_audrx_init, .be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup, .ops = &msm_be_ops, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_SLIMBUS_0_TX, .stream_name = "Slimbus Capture", .cpu_dai_name = "msm-dai-q6.16385", .platform_name = "msm-pcm-routing", .codec_name = "tabla_codec", .codec_dai_name = "tabla_tx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX, .be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup, .ops = &msm_be_ops, }, { .name = "SLIMBUS_2 Hostless Capture", .stream_name = "SLIMBUS_2 Hostless Capture", .cpu_dai_name = "msm-dai-q6.16389", .platform_name = "msm-pcm-hostless", .codec_name = "tabla_codec", .codec_dai_name = "tabla_tx2", .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ops = &msm_slimbus_2_be_ops, }, { .name = "SLIMBUS_2 Hostless Playback", .stream_name = "SLIMBUS_2 Hostless Playback", .cpu_dai_name = "msm-dai-q6.16388", .platform_name = "msm-pcm-hostless", .codec_name = "tabla_codec", .codec_dai_name = "tabla_rx3", .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ops = &msm_slimbus_2_be_ops, }, }; static struct snd_soc_dai_link msm_tabla1x_dai[ ARRAY_SIZE(msm_dai_common) + ARRAY_SIZE(msm_dai_delta_tabla1x)]; static struct snd_soc_dai_link msm_dai[ ARRAY_SIZE(msm_dai_common) + ARRAY_SIZE(msm_dai_delta_tabla2x)]; static struct snd_soc_card snd_soc_tabla1x_card_msm = { .name = "msm-tabla1x-snd-card", .dai_link = msm_tabla1x_dai, .num_links = ARRAY_SIZE(msm_tabla1x_dai), .controls = tabla_msm_controls, .num_controls = ARRAY_SIZE(tabla_msm_controls), }; static struct snd_soc_card snd_soc_card_msm = { .name = "msm-snd-card", .dai_link = msm_dai, .num_links = ARRAY_SIZE(msm_dai), .controls = tabla_msm_controls, .num_controls = ARRAY_SIZE(tabla_msm_controls), }; static struct platform_device *msm_snd_device; static struct platform_device *msm_snd_tabla1x_device; static int __init elite_audio_init(void) { int ret; if (!cpu_is_msm8960()) { pr_err("%s: Not the right machine type\n", __func__); return -ENODEV; } pr_debug("%s", __func__); msm_snd_device = platform_device_alloc("soc-audio", 0); if (!msm_snd_device) { pr_err("Platform device allocation failed\n"); return -ENOMEM; } memcpy(msm_dai, msm_dai_common, sizeof(msm_dai_common)); memcpy(msm_dai + ARRAY_SIZE(msm_dai_common), msm_dai_delta_tabla2x, sizeof(msm_dai_delta_tabla2x)); platform_set_drvdata(msm_snd_device, &snd_soc_card_msm); ret = platform_device_add(msm_snd_device); if (ret) { platform_device_put(msm_snd_device); return ret; } msm_snd_tabla1x_device = platform_device_alloc("soc-audio", 1); if (!msm_snd_tabla1x_device) { pr_err("Platform device allocation failed\n"); return -ENOMEM; } memcpy(msm_tabla1x_dai, msm_dai_common, sizeof(msm_dai_common)); memcpy(msm_tabla1x_dai + ARRAY_SIZE(msm_dai_common), msm_dai_delta_tabla1x, sizeof(msm_dai_delta_tabla1x)); platform_set_drvdata(msm_snd_tabla1x_device, &snd_soc_tabla1x_card_msm); ret = platform_device_add(msm_snd_tabla1x_device); if (ret) { platform_device_put(msm_snd_tabla1x_device); return ret; } mutex_init(&audio_notifier_lock); pr_debug("%s: register cable detect func for dock", __func__); ret = cable_detect_register_notifier(&audio_dock_notifier); mutex_init(&cdc_mclk_mutex); return ret; } late_initcall(elite_audio_init); static void __exit elite_audio_exit(void) { if (!cpu_is_msm8960()) { pr_err("%s: Not the right machine type\n", __func__); return; } pr_debug("%s", __func__); platform_device_unregister(msm_snd_device); platform_device_unregister(msm_snd_tabla1x_device); mutex_destroy(&audio_notifier_lock); mutex_destroy(&cdc_mclk_mutex); } module_exit(elite_audio_exit); MODULE_DESCRIPTION("ALSA Platform Elite"); MODULE_LICENSE("GPL v2");
kbc-developers/android_kernel_htc_msm8960
arch/arm/mach-msm/htc/elite/board-elite-audio.c
C
gpl-2.0
49,618
/* * Common code for the NVMe target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/random.h> #include <linux/rculist.h> #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); /* * This read/write semaphore is used to synchronize access to configuration * information on a target system that will result in discovery log page * information change for at least one host. * The full list of resources to protected by this semaphore is: * * - subsystems list * - per-subsystem allowed hosts list * - allow_any_host subsystem attribute * - nvmet_genctr * - the nvmet_transports array * * When updating any of those lists/structures write lock should be obtained, * while when reading (popolating discovery log page or checking host-subsystem * link) read lock is obtained to allow concurrent reads. */ DECLARE_RWSEM(nvmet_config_sem); static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn); u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, size_t len) { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; return 0; } u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; return 0; } static u32 nvmet_async_event_result(struct nvmet_async_event *aen) { return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); } static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) { struct nvmet_req *req; while (1) { mutex_lock(&ctrl->lock); if (!ctrl->nr_async_event_cmds) { mutex_unlock(&ctrl->lock); return; } req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); } } static void nvmet_async_event_work(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(work, struct nvmet_ctrl, async_event_work); struct nvmet_async_event *aen; struct nvmet_req *req; while (1) { mutex_lock(&ctrl->lock); aen = list_first_entry_or_null(&ctrl->async_events, struct nvmet_async_event, entry); if (!aen || !ctrl->nr_async_event_cmds) { mutex_unlock(&ctrl->lock); return; } req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; nvmet_set_result(req, nvmet_async_event_result(aen)); list_del(&aen->entry); kfree(aen); mutex_unlock(&ctrl->lock); nvmet_req_complete(req, 0); } } static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page) { struct nvmet_async_event *aen; aen = kmalloc(sizeof(*aen), GFP_KERNEL); if (!aen) return; aen->event_type = event_type; aen->event_info = event_info; aen->log_page = log_page; mutex_lock(&ctrl->lock); list_add_tail(&aen->entry, &ctrl->async_events); mutex_unlock(&ctrl->lock); schedule_work(&ctrl->async_event_work); } int nvmet_register_transport(struct nvmet_fabrics_ops *ops) { int ret = 0; down_write(&nvmet_config_sem); if (nvmet_transports[ops->type]) ret = -EINVAL; else nvmet_transports[ops->type] = ops; up_write(&nvmet_config_sem); return ret; } EXPORT_SYMBOL_GPL(nvmet_register_transport); void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops) { down_write(&nvmet_config_sem); nvmet_transports[ops->type] = NULL; up_write(&nvmet_config_sem); } EXPORT_SYMBOL_GPL(nvmet_unregister_transport); int nvmet_enable_port(struct nvmet_port *port) { struct nvmet_fabrics_ops *ops; int ret; lockdep_assert_held(&nvmet_config_sem); ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { up_write(&nvmet_config_sem); request_module("nvmet-transport-%d", port->disc_addr.trtype); down_write(&nvmet_config_sem); ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { pr_err("transport type %d not supported\n", port->disc_addr.trtype); return -EINVAL; } } if (!try_module_get(ops->owner)) return -EINVAL; ret = ops->add_port(port); if (ret) { module_put(ops->owner); return ret; } port->enabled = true; return 0; } void nvmet_disable_port(struct nvmet_port *port) { struct nvmet_fabrics_ops *ops; lockdep_assert_held(&nvmet_config_sem); port->enabled = false; ops = nvmet_transports[port->disc_addr.trtype]; ops->remove_port(port); module_put(ops->owner); } static void nvmet_keep_alive_timer(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), struct nvmet_ctrl, ka_work); pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", ctrl->cntlid, ctrl->kato); nvmet_ctrl_fatal_error(ctrl); } static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); } static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) { struct nvmet_ns *ns; list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { if (ns->nsid == le32_to_cpu(nsid)) return ns; } return NULL; } struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) { struct nvmet_ns *ns; rcu_read_lock(); ns = __nvmet_find_namespace(ctrl, nsid); if (ns) percpu_ref_get(&ns->ref); rcu_read_unlock(); return ns; } static void nvmet_destroy_namespace(struct percpu_ref *ref) { struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); complete(&ns->disable_done); } void nvmet_put_namespace(struct nvmet_ns *ns) { percpu_ref_put(&ns->ref); } int nvmet_ns_enable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; struct nvmet_ctrl *ctrl; int ret = 0; mutex_lock(&subsys->lock); if (ns->enabled) goto out_unlock; ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, NULL); if (IS_ERR(ns->bdev)) { pr_err("failed to open block device %s: (%ld)\n", ns->device_path, PTR_ERR(ns->bdev)); ret = PTR_ERR(ns->bdev); ns->bdev = NULL; goto out_unlock; } ns->size = i_size_read(ns->bdev->bd_inode); ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL); if (ret) goto out_blkdev_put; if (ns->nsid > subsys->max_nsid) subsys->max_nsid = ns->nsid; /* * The namespaces list needs to be sorted to simplify the implementation * of the Identify Namepace List subcommand. */ if (list_empty(&subsys->namespaces)) { list_add_tail_rcu(&ns->dev_link, &subsys->namespaces); } else { struct nvmet_ns *old; list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { BUG_ON(ns->nsid == old->nsid); if (ns->nsid < old->nsid) break; } list_add_tail_rcu(&ns->dev_link, &old->dev_link); } list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); ns->enabled = true; ret = 0; out_unlock: mutex_unlock(&subsys->lock); return ret; out_blkdev_put: blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); ns->bdev = NULL; goto out_unlock; } void nvmet_ns_disable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); if (!ns->enabled) goto out_unlock; ns->enabled = false; list_del_rcu(&ns->dev_link); mutex_unlock(&subsys->lock); /* * Now that we removed the namespaces from the lookup list, we * can kill the per_cpu ref and wait for any remaining references * to be dropped, as well as a RCU grace period for anyone only * using the namepace under rcu_read_lock(). Note that we can't * use call_rcu here as we need to ensure the namespaces have * been fully destroyed before unloading the module. */ percpu_ref_kill(&ns->ref); synchronize_rcu(); wait_for_completion(&ns->disable_done); percpu_ref_exit(&ns->ref); mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); if (ns->bdev) blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); out_unlock: mutex_unlock(&subsys->lock); } void nvmet_ns_free(struct nvmet_ns *ns) { nvmet_ns_disable(ns); kfree(ns->device_path); kfree(ns); } struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ns *ns; ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (!ns) return NULL; INIT_LIST_HEAD(&ns->dev_link); init_completion(&ns->disable_done); ns->nsid = nsid; ns->subsys = subsys; uuid_gen(&ns->uuid); return ns; } static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { if (status) nvmet_set_status(req, status); if (req->sq->size) req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; if (req->ns) nvmet_put_namespace(req->ns); req->ops->queue_response(req); } void nvmet_req_complete(struct nvmet_req *req, u16 status) { __nvmet_req_complete(req, status); percpu_ref_put(&req->sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_complete); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { cq->qid = qid; cq->size = size; ctrl->cqs[qid] = cq; } void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) { sq->sqhd = 0; sq->qid = qid; sq->size = size; ctrl->sqs[qid] = sq; } static void nvmet_confirm_sq(struct percpu_ref *ref) { struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); complete(&sq->confirm_done); } void nvmet_sq_destroy(struct nvmet_sq *sq) { /* * If this is the admin queue, complete all AERs so that our * queue doesn't have outstanding requests on it. */ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) nvmet_async_events_free(sq->ctrl); percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); if (sq->ctrl) { nvmet_ctrl_put(sq->ctrl); sq->ctrl = NULL; /* allows reusing the queue later */ } } EXPORT_SYMBOL_GPL(nvmet_sq_destroy); static void nvmet_sq_free(struct percpu_ref *ref) { struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); complete(&sq->free_done); } int nvmet_sq_init(struct nvmet_sq *sq) { int ret; ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); if (ret) { pr_err("percpu_ref init failed!\n"); return ret; } init_completion(&sq->free_done); init_completion(&sq->confirm_done); return 0; } EXPORT_SYMBOL_GPL(nvmet_sq_init); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; req->cq = cq; req->sq = sq; req->ops = ops; req->sg = NULL; req->sg_cnt = 0; req->rsp->status = 0; /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } /* either variant of SGLs is fine, as we don't support metadata */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } if (unlikely(!req->sq->ctrl)) /* will return an error for any Non-connect command: */ status = nvmet_parse_connect_cmd(req); else if (likely(req->sq->qid != 0)) status = nvmet_parse_io_cmd(req); else if (req->cmd->common.opcode == nvme_fabrics_command) status = nvmet_parse_fabrics_cmd(req); else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) status = nvmet_parse_discovery_cmd(req); else status = nvmet_parse_admin_cmd(req); if (status) goto fail; if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } return true; fail: __nvmet_req_complete(req, status); return false; } EXPORT_SYMBOL_GPL(nvmet_req_init); void nvmet_req_uninit(struct nvmet_req *req) { percpu_ref_put(&req->sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_uninit); static inline bool nvmet_cc_en(u32 cc) { return (cc >> NVME_CC_EN_SHIFT) & 0x1; } static inline u8 nvmet_cc_css(u32 cc) { return (cc >> NVME_CC_CSS_SHIFT) & 0x7; } static inline u8 nvmet_cc_mps(u32 cc) { return (cc >> NVME_CC_MPS_SHIFT) & 0xf; } static inline u8 nvmet_cc_ams(u32 cc) { return (cc >> NVME_CC_AMS_SHIFT) & 0x7; } static inline u8 nvmet_cc_shn(u32 cc) { return (cc >> NVME_CC_SHN_SHIFT) & 0x3; } static inline u8 nvmet_cc_iosqes(u32 cc) { return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; } static inline u8 nvmet_cc_iocqes(u32 cc) { return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; } static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || nvmet_cc_css(ctrl->cc) != 0) { ctrl->csts = NVME_CSTS_CFS; return; } ctrl->csts = NVME_CSTS_RDY; } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); /* XXX: tear down queues? */ ctrl->csts &= ~NVME_CSTS_RDY; ctrl->cc = 0; } void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) { u32 old; mutex_lock(&ctrl->lock); old = ctrl->cc; ctrl->cc = new; if (nvmet_cc_en(new) && !nvmet_cc_en(old)) nvmet_start_ctrl(ctrl); if (!nvmet_cc_en(new) && nvmet_cc_en(old)) nvmet_clear_ctrl(ctrl); if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { nvmet_clear_ctrl(ctrl); ctrl->csts |= NVME_CSTS_SHST_CMPLT; } if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; mutex_unlock(&ctrl->lock); } static void nvmet_init_cap(struct nvmet_ctrl *ctrl) { /* command sets supported: NVMe command set: */ ctrl->cap = (1ULL << 37); /* CC.EN timeout in 500msec units: */ ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ ctrl->cap |= NVMET_QUEUE_SIZE - 1; } u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req, struct nvmet_ctrl **ret) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; u16 status = 0; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; } mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { if (ctrl->cntlid == cntlid) { if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { pr_warn("hostnqn mismatch.\n"); continue; } if (!kref_get_unless_zero(&ctrl->ref)) continue; *ret = ctrl; goto out; } } pr_warn("could not find controller %d for subsys %s / host %s\n", cntlid, subsysnqn, hostnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; out: mutex_unlock(&subsys->lock); nvmet_subsys_put(subsys); return status; } u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) { if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n", cmd->common.opcode, req->sq->qid); return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n", cmd->common.opcode, req->sq->qid); req->ns = NULL; return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } return 0; } static bool __nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) { struct nvmet_host_link *p; if (subsys->allow_any_host) return true; list_for_each_entry(p, &subsys->hosts, entry) { if (!strcmp(nvmet_host_name(p->host), hostnqn)) return true; } return false; } static bool nvmet_host_discovery_allowed(struct nvmet_req *req, const char *hostnqn) { struct nvmet_subsys_link *s; list_for_each_entry(s, &req->port->subsystems, entry) { if (__nvmet_host_allowed(s->subsys, hostnqn)) return true; } return false; } bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, const char *hostnqn) { lockdep_assert_held(&nvmet_config_sem); if (subsys->type == NVME_NQN_DISC) return nvmet_host_discovery_allowed(req, hostnqn); else return __nvmet_host_allowed(subsys, hostnqn); } u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; int ret; u16 status; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); goto out; } status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; down_read(&nvmet_config_sem); if (!nvmet_host_allowed(req, subsys, hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", hostnqn, subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; goto out_put_subsystem; } up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) goto out_put_subsystem; mutex_init(&ctrl->lock); nvmet_init_cap(ctrl); INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); kref_init(&ctrl->ref); ctrl->subsys = subsys; ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *), GFP_KERNEL); if (!ctrl->cqs) goto out_free_ctrl; ctrl->sqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_sq *), GFP_KERNEL); if (!ctrl->sqs) goto out_free_cqs; ret = ida_simple_get(&cntlid_ida, NVME_CNTLID_MIN, NVME_CNTLID_MAX, GFP_KERNEL); if (ret < 0) { status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; goto out_free_sqs; } ctrl->cntlid = ret; ctrl->ops = req->ops; if (ctrl->subsys->type == NVME_NQN_DISC) { /* Don't accept keep-alive timeout for discovery controllers */ if (kato) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out_free_sqs; } /* * Discovery controllers use some arbitrary high value in order * to cleanup stale discovery sessions * * From the latest base diff RC: * "The Keep Alive command is not supported by * Discovery controllers. A transport may specify a * fixed Discovery controller activity timeout value * (e.g., 2 minutes). If no commands are received * by a Discovery controller within that time * period, the controller may perform the * actions for Keep Alive Timer expiration". */ ctrl->kato = NVMET_DISC_KATO; } else { /* keep-alive timeout in seconds */ ctrl->kato = DIV_ROUND_UP(kato, 1000); } nvmet_start_keep_alive_timer(ctrl); mutex_lock(&subsys->lock); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); mutex_unlock(&subsys->lock); *ctrlp = ctrl; return 0; out_free_sqs: kfree(ctrl->sqs); out_free_cqs: kfree(ctrl->cqs); out_free_ctrl: kfree(ctrl); out_put_subsystem: nvmet_subsys_put(subsys); out: return status; } static void nvmet_ctrl_free(struct kref *ref) { struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); struct nvmet_subsys *subsys = ctrl->subsys; nvmet_stop_keep_alive_timer(ctrl); mutex_lock(&subsys->lock); list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fatal_err_work); ida_simple_remove(&cntlid_ida, ctrl->cntlid); nvmet_subsys_put(subsys); kfree(ctrl->sqs); kfree(ctrl->cqs); kfree(ctrl); } void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) { kref_put(&ctrl->ref, nvmet_ctrl_free); } static void nvmet_fatal_error_handler(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(work, struct nvmet_ctrl, fatal_err_work); pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); ctrl->ops->delete_ctrl(ctrl); } void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { mutex_lock(&ctrl->lock); if (!(ctrl->csts & NVME_CSTS_CFS)) { ctrl->csts |= NVME_CSTS_CFS; INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); schedule_work(&ctrl->fatal_err_work); } mutex_unlock(&ctrl->lock); } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn) { struct nvmet_subsys_link *p; if (!port) return NULL; if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn, NVMF_NQN_SIZE)) { if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) return NULL; return nvmet_disc_subsys; } down_read(&nvmet_config_sem); list_for_each_entry(p, &port->subsystems, entry) { if (!strncmp(p->subsys->subsysnqn, subsysnqn, NVMF_NQN_SIZE)) { if (!kref_get_unless_zero(&p->subsys->ref)) break; up_read(&nvmet_config_sem); return p->subsys; } } up_read(&nvmet_config_sem); return NULL; } struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type) { struct nvmet_subsys *subsys; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return NULL; subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ /* generate a random serial number as our controllers are ephemeral: */ get_random_bytes(&subsys->serial, sizeof(subsys->serial)); switch (type) { case NVME_NQN_NVME: subsys->max_qid = NVMET_NR_QUEUES; break; case NVME_NQN_DISC: subsys->max_qid = 0; break; default: pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); kfree(subsys); return NULL; } subsys->type = type; subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, GFP_KERNEL); if (!subsys->subsysnqn) { kfree(subsys); return NULL; } kref_init(&subsys->ref); mutex_init(&subsys->lock); INIT_LIST_HEAD(&subsys->namespaces); INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->hosts); return subsys; } static void nvmet_subsys_free(struct kref *ref) { struct nvmet_subsys *subsys = container_of(ref, struct nvmet_subsys, ref); WARN_ON_ONCE(!list_empty(&subsys->namespaces)); kfree(subsys->subsysnqn); kfree(subsys); } void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) { struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) ctrl->ops->delete_ctrl(ctrl); mutex_unlock(&subsys->lock); } void nvmet_subsys_put(struct nvmet_subsys *subsys) { kref_put(&subsys->ref, nvmet_subsys_free); } static int __init nvmet_init(void) { int error; error = nvmet_init_discovery(); if (error) goto out; error = nvmet_init_configfs(); if (error) goto out_exit_discovery; return 0; out_exit_discovery: nvmet_exit_discovery(); out: return error; } static void __exit nvmet_exit(void) { nvmet_exit_configfs(); nvmet_exit_discovery(); ida_destroy(&cntlid_ida); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); } module_init(nvmet_init); module_exit(nvmet_exit); MODULE_LICENSE("GPL v2");
paulluo/linux
drivers/nvme/target/core.c
C
gpl-2.0
24,383
/* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/param.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/idr.h> #include <linux/i2c.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <linux/time.h> #include <linux/mfd/pmic8058.h> #include <linux/regulator/pmic8058-regulator.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/regulator/machine.h> #include <linux/err.h> #include <linux/qpnp-charger.h> #include <linux/i2c/bq27520.h> /* use the same platform data as bq27520 */ #include <linux/of_gpio.h> //sjc0623 add #ifdef CONFIG_MACH_MSM8974_14001 /*OPPO 2013-09-22 liaofuchun add for bq27541 encryption*/ #include <linux/random.h> #include <linux/rtc.h> extern char *BQ27541_HMACSHA1_authenticate(char *Message,char *Key,char *result); #endif //CONFIG_MACH_MSM8974_14001 #ifdef CONFIG_OPPO_MSM_14021 /* OPPO 2014-06-23 sjc Add begin for 14021 */ static int mcu_en_gpio = 0; void mcu_en_gpio_set(int value) { if (value) { if (gpio_is_valid(mcu_en_gpio)) gpio_set_value(mcu_en_gpio, 0);///1); } else { if (gpio_is_valid(mcu_en_gpio)) { gpio_set_value(mcu_en_gpio, 1); usleep_range(10000, 10000); gpio_set_value(mcu_en_gpio, 0); } } } #else void mcu_en_gpio_set(int value) { return; } #endif //CONFIG_OPPO_MSM_14021 extern int load_soc(void);//sjc1121 extern void backup_soc_ex(int soc); /* yangfangbiao@oneplus.cn, 2015/01/19 Add for sync with android 4.4 */ /* OPPO 2013-12-20 liaofuchun add for fastchg firmware update */ #ifdef CONFIG_PIC1503_FASTCG extern unsigned char Pic16F_firmware_data[]; extern int pic_fw_ver_count; extern int pic_need_to_up_fw; extern int pic_have_updated; extern int pic16f_fw_update(bool pull96); #endif /* OPPO 2013-12-20 liaofuchun add end */ #define DRIVER_VERSION "1.1.0" /* Bq27541 standard data commands */ #define BQ27541_REG_CNTL 0x00 #define BQ27541_REG_AR 0x02 #define BQ27541_REG_ARTTE 0x04 #define BQ27541_REG_TEMP 0x06 #define BQ27541_REG_VOLT 0x08 #define BQ27541_REG_FLAGS 0x0A #define BQ27541_REG_NAC 0x0C #define BQ27541_REG_FAC 0x0e #define BQ27541_REG_RM 0x10 #define BQ27541_REG_FCC 0x12 #define BQ27541_REG_AI 0x14 #define BQ27541_REG_TTE 0x16 #define BQ27541_REG_TTF 0x18 #define BQ27541_REG_SI 0x1a #define BQ27541_REG_STTE 0x1c #define BQ27541_REG_MLI 0x1e #define BQ27541_REG_MLTTE 0x20 #define BQ27541_REG_AE 0x22 #define BQ27541_REG_AP 0x24 #define BQ27541_REG_TTECP 0x26 #define BQ27541_REG_SOH 0x28 #define BQ27541_REG_SOC 0x2c #define BQ27541_REG_NIC 0x2e #define BQ27541_REG_ICR 0x30 #define BQ27541_REG_LOGIDX 0x32 #define BQ27541_REG_LOGBUF 0x34 #define BQ27541_FLAG_DSC BIT(0) #define BQ27541_FLAG_FC BIT(9) #define BQ27541_CS_DLOGEN BIT(15) #define BQ27541_CS_SS BIT(13) /* Control subcommands */ #define BQ27541_SUBCMD_CTNL_STATUS 0x0000 #define BQ27541_SUBCMD_DEVCIE_TYPE 0x0001 #define BQ27541_SUBCMD_FW_VER 0x0002 #define BQ27541_SUBCMD_HW_VER 0x0003 #define BQ27541_SUBCMD_DF_CSUM 0x0004 #define BQ27541_SUBCMD_PREV_MACW 0x0007 #define BQ27541_SUBCMD_CHEM_ID 0x0008 #define BQ27541_SUBCMD_BD_OFFSET 0x0009 #define BQ27541_SUBCMD_INT_OFFSET 0x000a #define BQ27541_SUBCMD_CC_VER 0x000b #define BQ27541_SUBCMD_OCV 0x000c #define BQ27541_SUBCMD_BAT_INS 0x000d #define BQ27541_SUBCMD_BAT_REM 0x000e #define BQ27541_SUBCMD_SET_HIB 0x0011 #define BQ27541_SUBCMD_CLR_HIB 0x0012 #define BQ27541_SUBCMD_SET_SLP 0x0013 #define BQ27541_SUBCMD_CLR_SLP 0x0014 #define BQ27541_SUBCMD_FCT_RES 0x0015 #define BQ27541_SUBCMD_ENABLE_DLOG 0x0018 #define BQ27541_SUBCMD_DISABLE_DLOG 0x0019 #define BQ27541_SUBCMD_SEALED 0x0020 #define BQ27541_SUBCMD_ENABLE_IT 0x0021 #define BQ27541_SUBCMD_DISABLE_IT 0x0023 #define BQ27541_SUBCMD_CAL_MODE 0x0040 #define BQ27541_SUBCMD_RESET 0x0041 #define ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN (-2731) #define BQ27541_INIT_DELAY ((HZ)*1) /* OPPO 2013-08-24 wangjc Add begin for filter soc. */ #ifdef CONFIG_MACH_MSM8974_14001 #define CAPACITY_SALTATE_COUNTER 4 #define CAPACITY_SALTATE_COUNTER_NOT_CHARGING 13//40sec #ifdef CONFIG_MACH_MSM8974_14001 /* yangfangbiao@oneplus.cn, 2015/01/06 Add for sync with KK charge standard */ #define CAPACITY_SALTATE_COUNTER_60 20//40 1min #define CAPACITY_SALTATE_COUNTER_95 50//60 2.5min #define CAPACITY_SALTATE_COUNTER_FULL 100//120 5min #define CAPACITY_SALTATE_COUNTER_CHARGING_TERM 20//30 1min #endif /*CONFIG_MACH_MSM8974_14001*/ #define SOC_SHUTDOWN_VALID_LIMITS 20 /* yangfangbiao@oneplus.cn, 2015/01/06 Add for sync with KK charge standard */ #define TEN_MINUTES 600 #endif /* OPPO 2013-08-24 wangjc Add end */ /* If the system has several batteries we need a different name for each * of them... */ static DEFINE_IDR(battery_id); static DEFINE_MUTEX(battery_mutex); struct bq27541_device_info; struct bq27541_access_methods { int (*read)(u8 reg, int *rt_value, int b_single, struct bq27541_device_info *di); }; struct bq27541_device_info { struct device *dev; int id; struct bq27541_access_methods *bus; struct i2c_client *client; struct work_struct counter; /* 300ms delay is needed after bq27541 is powered up * and before any successful I2C transaction */ struct delayed_work hw_config; /* OPPO 2013-08-24 wangjc Add begin for filter soc. */ #ifdef CONFIG_MACH_MSM8974_14001 int cc_pre; int fcc_pre; int soc_pre; int temp_pre; int batt_vol_pre; int current_pre; int saltate_counter; int report_count; bool is_authenticated; //wangjc add for authentication bool fast_chg_started; bool fast_switch_to_normal; bool fast_normal_to_warm; //lfc add for fastchg over temp int battery_type; //lfc add for battery type struct power_supply *batt_psy; int irq; struct work_struct fastcg_work; bool alow_reading; struct timer_list watchdog; struct wake_lock fastchg_wake_lock; bool fast_chg_allow; bool fast_low_temp_full; /* jingchun.wang@Onlinerd.Driver, 2014/02/12 Add for retry when config fail */ int retry_count; /* jingchun.wang@Onlinerd.Driver, 2014/02/27 Add for get right soc when sleep long time */ unsigned long rtc_resume_time; unsigned long rtc_suspend_time; atomic_t suspended; #endif bool fast_chg_ing; /* OPPO 2013-08-24 wangjc Add end */ }; static int coulomb_counter; static spinlock_t lock; /* protect access to coulomb_counter */ static struct bq27541_device_info *bq27541_di; static int bq27541_i2c_txsubcmd(u8 reg, unsigned short subcmd, struct bq27541_device_info *di); static int bq27541_read(u8 reg, int *rt_value, int b_single, struct bq27541_device_info *di) { return di->bus->read(reg, rt_value, b_single, di); } /* * Return the battery temperature in tenths of degree Celsius * Or < 0 if something fails. */ static int bq27541_battery_temperature(struct bq27541_device_info *di) { int ret; int temp = 0; static int count = 0; #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/27 Add for get right soc when sleep long time */ if(atomic_read(&di->suspended) == 1) { return di->temp_pre + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; } #endif /*CONFIG_MACH_MSM8974_14001*/ if(di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_TEMP, &temp, 0, di); #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/01/08 Add for don't report battery not connect when reading error once. */ if (ret) { count++; dev_err(di->dev, "error reading temperature\n"); if(count > 1) { count = 0; /* jingchun.wang@Onlinerd.Driver, 2014/01/22 Add for it report bad status when plug out battery */ di->temp_pre = -400 - ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; return -400; } else { return di->temp_pre + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; } } count = 0; #endif /*CONFIG_MACH_MSM8974_14001*/ } else { return di->temp_pre + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; } di->temp_pre = temp; return temp + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; } /* OPPO 2013-08-24 wangjc Add begin for add adc interface. */ #ifdef CONFIG_MACH_MSM8974_14001 #define BQ27541_REG_CC 0x2a static int bq27541_battery_cc(struct bq27541_device_info *di)/* yangfangbiao@oneplus.cn, 2015/02/13 Add cc interface */ { int ret; int cc = 0; if (atomic_read(&di->suspended) == 1) return di->cc_pre; if (di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_CC, &cc, 0, di); if (ret) { dev_err(di->dev, "error reading cc.\n"); return ret; } } else { return di->cc_pre; } di->cc_pre = cc; return cc; } static int bq27541_battery_fcc(struct bq27541_device_info *di)//sjc20150105 { int ret; int fcc = 0; if (di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_FCC, &fcc, 0, di); if (ret) { dev_err(di->dev, "error reading fcc.\n"); return ret; } } return fcc; } static int bq27541_remaining_capacity(struct bq27541_device_info *di) { int ret; int cap = 0; if(di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_RM, &cap, 0, di); if (ret) { dev_err(di->dev, "error reading capacity.\n"); return ret; } } return cap; } static int bq27541_battery_voltage(struct bq27541_device_info *di); extern int get_charging_status(void); extern int fuelgauge_battery_temp_region_get(void); static int bq27541_soc_calibrate(struct bq27541_device_info *di, int soc) { union power_supply_propval ret = {0,}; unsigned int soc_calib; int counter_temp = 0; static int charging_status = 0;//sjc1121 static int charging_status_pre = 0; /* yangfangbiao@oneplus.cn, 2015/01/06 Modify for sync with KK charge standard */ int soc_load;//sjc1121 int soc_temp; if(!di->batt_psy){ di->batt_psy = power_supply_get_by_name("battery"); //get the soc before reboot soc_load = load_soc(); if (soc_load == -1) { //get last soc error di->soc_pre = soc; } else if(abs(soc - soc_load) > SOC_SHUTDOWN_VALID_LIMITS) { //the battery maybe changed di->soc_pre = soc; } else { //compare the soc and the last soc if(soc_load > soc) { di->soc_pre = soc_load -1; } else { di->soc_pre = soc_load; } } #ifdef CONFIG_MACH_MSM8974_14001 /* yangfangbiao@oneplus.cn, 2015/02/3 Modify for V2.4 charge standard */ if (!di->batt_psy) { return di->soc_pre; } #endif /*CONFIG_MACH_MSM8974_14001*/ //store the soc when boot first time backup_soc_ex(di->soc_pre); } soc_temp = di->soc_pre; if(di->batt_psy){ ret.intval = get_charging_status();//sjc20150104 if(ret.intval == POWER_SUPPLY_STATUS_CHARGING || ret.intval == POWER_SUPPLY_STATUS_FULL) { // is charging charging_status = 1; } else { charging_status = 0; } if (charging_status ^ charging_status_pre) { charging_status_pre = charging_status; di->saltate_counter = 0; } if (charging_status) { // is charging /* yangfangbiao@oneplus.cn, 2015/01/06 Modify begin for sync with KK charge standard */ if (ret.intval == POWER_SUPPLY_STATUS_FULL) { soc_calib = di->soc_pre; if (di->soc_pre < 100 && (fuelgauge_battery_temp_region_get() == CV_BATTERY_TEMP_REGION__LITTLE_COOL || fuelgauge_battery_temp_region_get() == CV_BATTERY_TEMP_REGION__NORMAL)) {//sjc20150104 if (di->saltate_counter < CAPACITY_SALTATE_COUNTER_CHARGING_TERM) { di->saltate_counter++; } else { soc_calib = di->soc_pre + 1; di->saltate_counter = 0; } } } else { /* yangfangbiao@oneplus.cn, 2015/01/06 Modify end for sync with KK charge standard */ if(abs(soc - di->soc_pre) > 0) { di->saltate_counter++; if(di->saltate_counter < CAPACITY_SALTATE_COUNTER) return di->soc_pre; else di->saltate_counter = 0; } else di->saltate_counter = 0; if(soc > di->soc_pre) { soc_calib = di->soc_pre + 1; } else if(soc < (di->soc_pre - 2)) { /* jingchun.wang@Onlinerd.Driver, 2013/04/14 Add for allow soc fail when charging. */ soc_calib = di->soc_pre - 1; } else { soc_calib = di->soc_pre; } /* jingchun.wang@Onlinerd.Driver, 2013/12/12 Add for set capacity to 100 when full in normal temp */ if(ret.intval == POWER_SUPPLY_STATUS_FULL) { if(soc > 94) { soc_calib = 100; } } } } else { // not charging if ((abs(soc - di->soc_pre) > 0) || (di->batt_vol_pre <= 3300 * 1000 && di->batt_vol_pre > 2500 * 1000)) {//sjc1118 add for batt_vol is too low but soc is not jumping di->saltate_counter++; if(di->soc_pre == 100) { counter_temp = CAPACITY_SALTATE_COUNTER_FULL;//t>=5min } else if (di->soc_pre > 95) { counter_temp = CAPACITY_SALTATE_COUNTER_95;///t>=2.5min } else if (di->soc_pre > 60) { counter_temp = CAPACITY_SALTATE_COUNTER_60;//t>=1min } else { counter_temp = CAPACITY_SALTATE_COUNTER_NOT_CHARGING;//t>=40sec } /* sjc1020, when batt_vol is too low(and soc is jumping), decrease faster to avoid dead battery shutdown */ if (di->batt_vol_pre <= 3300 * 1000 && di->batt_vol_pre > 2500 * 1000 && di->soc_pre <= 10) { if (bq27541_battery_voltage(di) <= 3300 * 1000 && bq27541_battery_voltage(di) > 2500 * 1000) {//check again counter_temp = CAPACITY_SALTATE_COUNTER - 1;//about 9s } } if(di->saltate_counter < counter_temp) return di->soc_pre; else di->saltate_counter = 0; } else di->saltate_counter = 0; if(soc < di->soc_pre) soc_calib = di->soc_pre - 1; else if (di->batt_vol_pre <= 3300 * 1000 && di->batt_vol_pre > 2500 * 1000 && di->soc_pre > 0)//sjc1118 add for batt_vol is too low but soc is not jumping soc_calib = di->soc_pre - 1; else soc_calib = di->soc_pre; } } else { soc_calib = soc; } if(soc_calib > 100) soc_calib = 100; di->soc_pre = soc_calib; if(soc_temp != soc_calib) { //store when soc changed backup_soc_ex(soc_calib); pr_info("soc:%d, soc_calib:%d\n", soc, soc_calib); } return soc_calib; } static int bq27541_battery_soc(struct bq27541_device_info *di, bool raw) { int ret; int soc = 0; #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/27 Add for get right soc when sleep long time */ if(atomic_read(&di->suspended) == 1) { return di->soc_pre; } #endif /*CONFIG_MACH_MSM8974_14001*/ if(di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_SOC, &soc, 0, di); if (ret) { dev_err(di->dev, "error reading soc.ret:%d\n",ret); goto read_soc_err; } } else { if(di->soc_pre) return di->soc_pre; else return 0; } if (raw == true) { if(soc > 90) { soc += 2; } if(soc <= di->soc_pre) { di->soc_pre = soc; } } soc = bq27541_soc_calibrate(di,soc); return soc; read_soc_err: if(di->soc_pre) return di->soc_pre; else return 0; } static int bq27541_average_current(struct bq27541_device_info *di) { int ret; int curr = 0; #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/27 Add for get right soc when sleep long time */ if(atomic_read(&di->suspended) == 1) { return -di->current_pre; } #endif /*CONFIG_MACH_MSM8974_14001*/ if(di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_AI, &curr, 0, di); if (ret) { dev_err(di->dev, "error reading current.\n"); return ret; } } else { return -di->current_pre; } // negative current if(curr&0x8000) curr = -((~(curr-1))&0xFFFF); di->current_pre = curr; return -curr; } #endif /* OPPO 2013-08-24 wangjc Add end */ /* * Return the battery Voltage in milivolts * Or < 0 if something fails. */ static int bq27541_battery_voltage(struct bq27541_device_info *di) { int ret; int volt = 0; #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/27 Add for get right soc when sleep long time */ if(atomic_read(&di->suspended) == 1) { return di->batt_vol_pre; } #endif /*CONFIG_MACH_MSM8974_14001*/ if(di->alow_reading == true) { ret = bq27541_read(BQ27541_REG_VOLT, &volt, 0, di); if (ret) { dev_err(di->dev, "error reading voltage,ret:%d\n",ret); return ret; } } else { return di->batt_vol_pre; } di->batt_vol_pre = volt * 1000; return volt * 1000; } static void bq27541_cntl_cmd(struct bq27541_device_info *di, int subcmd) { bq27541_i2c_txsubcmd(BQ27541_REG_CNTL, subcmd, di); } /* * i2c specific code */ static int bq27541_i2c_txsubcmd(u8 reg, unsigned short subcmd, struct bq27541_device_info *di) { struct i2c_msg msg; unsigned char data[3]; int ret; if (!di->client) return -ENODEV; memset(data, 0, sizeof(data)); data[0] = reg; data[1] = subcmd & 0x00FF; data[2] = (subcmd & 0xFF00) >> 8; msg.addr = di->client->addr; msg.flags = 0; msg.len = 3; msg.buf = data; ret = i2c_transfer(di->client->adapter, &msg, 1); if (ret < 0) return -EIO; return 0; } static int bq27541_chip_config(struct bq27541_device_info *di) { int flags = 0, ret = 0; bq27541_cntl_cmd(di, BQ27541_SUBCMD_CTNL_STATUS); udelay(66); ret = bq27541_read(BQ27541_REG_CNTL, &flags, 0, di); if (ret < 0) { dev_err(di->dev, "error reading register %02x ret = %d\n", BQ27541_REG_CNTL, ret); return ret; } udelay(66); bq27541_cntl_cmd(di, BQ27541_SUBCMD_ENABLE_IT); udelay(66); if (!(flags & BQ27541_CS_DLOGEN)) { bq27541_cntl_cmd(di, BQ27541_SUBCMD_ENABLE_DLOG); udelay(66); } return 0; } static void bq27541_coulomb_counter_work(struct work_struct *work) { int value = 0, temp = 0, index = 0, ret = 0; struct bq27541_device_info *di; unsigned long flags; int count = 0; di = container_of(work, struct bq27541_device_info, counter); /* retrieve 30 values from FIFO of coulomb data logging buffer * and average over time */ do { ret = bq27541_read(BQ27541_REG_LOGBUF, &temp, 0, di); if (ret < 0) break; if (temp != 0x7FFF) { ++count; value += temp; } /* delay 66uS, waiting time between continuous reading * results */ udelay(66); ret = bq27541_read(BQ27541_REG_LOGIDX, &index, 0, di); if (ret < 0) break; udelay(66); } while (index != 0 || temp != 0x7FFF); if (ret < 0) { dev_err(di->dev, "Error reading datalog register\n"); return; } if (count) { spin_lock_irqsave(&lock, flags); coulomb_counter = value/count; spin_unlock_irqrestore(&lock, flags); } } static int bq27541_get_battery_mvolts(void) { return bq27541_battery_voltage(bq27541_di); } static int bq27541_get_battery_temperature(void) { return bq27541_battery_temperature(bq27541_di); } static int bq27541_is_battery_present(void) { return 1; } static int bq27541_is_battery_temp_within_range(void) { return 1; } static int bq27541_is_battery_id_valid(void) { return 1; } /* OPPO 2013-08-24 wangjc Add begin for add adc interface. */ #ifdef CONFIG_MACH_MSM8974_14001 static int bq27541_get_batt_cc(void)/* yangfangbiao@oneplus.cn, 2015/02/13 Add cc interface */ { return bq27541_battery_cc(bq27541_di); } static int bq27541_get_batt_fcc(void)//sjc20150105 { return bq27541_battery_fcc(bq27541_di); } static int bq27541_get_batt_remaining_capacity(void) { return bq27541_remaining_capacity(bq27541_di); } static int bq27541_get_battery_soc(void) { return bq27541_battery_soc(bq27541_di, false); } static int bq27541_get_average_current(void) { return bq27541_average_current(bq27541_di); } //wangjc add for authentication static int bq27541_is_battery_authenticated(void) { if(bq27541_di) { return bq27541_di->is_authenticated; } return false; } static int bq27541_fast_chg_started(void) { if(bq27541_di) { return bq27541_di->fast_chg_started; } return false; } static int bq27541_fast_switch_to_normal(void) { if(bq27541_di) { //pr_err("%s fast_switch_to_normal:%d\n",__func__,bq27541_di->fast_switch_to_normal); return bq27541_di->fast_switch_to_normal; } return false; } static int bq27541_set_switch_to_noraml_false(void) { if(bq27541_di) { bq27541_di->fast_switch_to_normal = false; } return 0; } static int bq27541_get_fast_low_temp_full(void) { if(bq27541_di) { return bq27541_di->fast_low_temp_full; } return false; } static int bq27541_set_fast_low_temp_full_false(void) { if(bq27541_di) { return bq27541_di->fast_low_temp_full = false; } return 0; } #endif /* OPPO 2013-08-24 wangjc Add end */ /* OPPO 2013-12-12 liaofuchun add for set/get fastchg allow begin*/ static int bq27541_fast_normal_to_warm(void) { if(bq27541_di) { //pr_err("%s fast_switch_to_normal:%d\n",__func__,bq27541_di->fast_switch_to_normal); return bq27541_di->fast_normal_to_warm; } return 0; } static int bq27541_set_fast_normal_to_warm_false(void) { if(bq27541_di) { bq27541_di->fast_normal_to_warm = false; } return 0; } static int bq27541_set_fast_chg_allow(int enable) { if(bq27541_di) { bq27541_di->fast_chg_allow = enable; } return 0; } static int bq27541_get_fast_chg_allow(void) { if(bq27541_di) { return bq27541_di->fast_chg_allow; } return 0; } static int bq27541_get_fast_chg_ing(void) { if(bq27541_di) { return bq27541_di->fast_chg_ing; } return 0; } /* OPPO 2013-12-12 liaofuchun add for set/get fastchg allow end */ static struct qpnp_battery_gauge bq27541_batt_gauge = { .get_battery_mvolts = bq27541_get_battery_mvolts, .get_battery_temperature = bq27541_get_battery_temperature, .is_battery_present = bq27541_is_battery_present, .is_battery_temp_within_range = bq27541_is_battery_temp_within_range, .is_battery_id_valid = bq27541_is_battery_id_valid, /* OPPO 2013-09-30 wangjc Add begin for add new interface */ #ifdef CONFIG_MACH_MSM8974_14001 .get_batt_cc = bq27541_get_batt_cc, /* yangfangbiao@oneplus.cn, 2015/02/13 Add cc interface */ .get_batt_fcc = bq27541_get_batt_fcc, /* yangfangbiao@oneplus.cn, 2015/01/06 Add for sync with KK charge standard */ .get_batt_remaining_capacity = bq27541_get_batt_remaining_capacity, .get_battery_soc = bq27541_get_battery_soc, .get_average_current = bq27541_get_average_current, //wangjc add for authentication .is_battery_authenticated = bq27541_is_battery_authenticated, .fast_chg_started = bq27541_fast_chg_started, .fast_switch_to_normal = bq27541_fast_switch_to_normal, .set_switch_to_noraml_false = bq27541_set_switch_to_noraml_false, .set_fast_chg_allow = bq27541_set_fast_chg_allow, .get_fast_chg_allow = bq27541_get_fast_chg_allow, .fast_normal_to_warm = bq27541_fast_normal_to_warm, .set_normal_to_warm_false = bq27541_set_fast_normal_to_warm_false, .get_fast_chg_ing = bq27541_get_fast_chg_ing, .get_fast_low_temp_full = bq27541_get_fast_low_temp_full, .set_low_temp_full_false = bq27541_set_fast_low_temp_full_false, #endif /* OPPO 2013-09-30 wangjc Add end */ }; static bool bq27541_authenticate(struct i2c_client *client); static int bq27541_batt_type_detect(struct i2c_client *client); static void bq27541_hw_config(struct work_struct *work) { int ret = 0, flags = 0, type = 0, fw_ver = 0; struct bq27541_device_info *di; di = container_of(work, struct bq27541_device_info, hw_config.work); ret = bq27541_chip_config(di); if (ret) { dev_err(di->dev, "Failed to config Bq27541\n"); #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/12 Add for retry when config fail */ di->retry_count--; if(di->retry_count > 0) { schedule_delayed_work(&di->hw_config, HZ); } #endif /*CONFIG_MACH_MSM8974_14001*/ return; } qpnp_battery_gauge_register(&bq27541_batt_gauge); bq27541_cntl_cmd(di, BQ27541_SUBCMD_CTNL_STATUS); udelay(66); bq27541_read(BQ27541_REG_CNTL, &flags, 0, di); bq27541_cntl_cmd(di, BQ27541_SUBCMD_DEVCIE_TYPE); udelay(66); bq27541_read(BQ27541_REG_CNTL, &type, 0, di); bq27541_cntl_cmd(di, BQ27541_SUBCMD_FW_VER); udelay(66); bq27541_read(BQ27541_REG_CNTL, &fw_ver, 0, di); #ifdef CONFIG_MACH_MSM8974_14001 /*OPPO 2013-09-18 liaofuchun add begin for check authenticate data*/ di->is_authenticated = bq27541_authenticate(di->client); di->battery_type = bq27541_batt_type_detect(di->client); #endif //CONFIG_MACH_MSM8974_14001 dev_info(di->dev, "DEVICE_TYPE is 0x%02X, FIRMWARE_VERSION is 0x%02X\n", type, fw_ver); dev_info(di->dev, "Complete bq27541 configuration 0x%02X\n", flags); } static int bq27541_read_i2c(u8 reg, int *rt_value, int b_single, struct bq27541_device_info *di) { struct i2c_client *client = di->client; /* OPPO 2013-12-09 wangjc Modify begin for use standard i2c interface */ #ifndef CONFIG_MACH_MSM8974_14001 struct i2c_msg msg[1]; #else struct i2c_msg msg[2]; #endif /* OPPO 2013-12-09 wangjc Modify end */ unsigned char data[2]; int err; if (!client->adapter) return -ENODEV; /* OPPO 2013-09-30 wangjc Add begin for eliminate conflict */ #ifdef CONFIG_MACH_MSM8974_14001 mutex_lock(&battery_mutex); #endif /* OPPO 2013-09-30 wangjc Add end */ /* OPPO 2013-12-09 wangjc Modify begin for use standard i2c interface */ #ifndef CONFIG_MACH_MSM8974_14001 msg->addr = client->addr; msg->flags = 0; msg->len = 1; msg->buf = data; data[0] = reg; err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) msg->len = 2; else msg->len = 1; msg->flags = I2C_M_RD; err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) *rt_value = get_unaligned_le16(data); else *rt_value = data[0]; mutex_unlock(&battery_mutex); return 0; } } #else /* Write register */ msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = data; data[0] = reg; /* Read data */ msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; if (!b_single) msg[1].len = 2; else msg[1].len = 1; msg[1].buf = data; err = i2c_transfer(client->adapter, msg, 2); if (err >= 0) { if (!b_single) *rt_value = get_unaligned_le16(data); else *rt_value = data[0]; mutex_unlock(&battery_mutex); return 0; } #endif /* OPPO 2013-12-09 wangjc Modify end */ /* OPPO 2013-09-30 wangjc Add begin for eliminate conflict */ #ifdef CONFIG_MACH_MSM8974_14001 mutex_unlock(&battery_mutex); #endif /* OPPO 2013-09-30 wangjc Add end */ return err; } #ifdef CONFIG_BQ27541_TEST_ENABLE static int reg; static int subcmd; static ssize_t bq27541_read_stdcmd(struct device *dev, struct device_attribute *attr, char *buf) { int ret; int temp = 0; struct platform_device *client; struct bq27541_device_info *di; client = to_platform_device(dev); di = platform_get_drvdata(client); if (reg <= BQ27541_REG_ICR && reg > 0x00) { ret = bq27541_read(reg, &temp, 0, di); if (ret) ret = snprintf(buf, PAGE_SIZE, "Read Error!\n"); else ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp); } else ret = snprintf(buf, PAGE_SIZE, "Register Error!\n"); return ret; } static ssize_t bq27541_write_stdcmd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret = strnlen(buf, PAGE_SIZE); int cmd; sscanf(buf, "%x", &cmd); reg = cmd; return ret; } static ssize_t bq27541_read_subcmd(struct device *dev, struct device_attribute *attr, char *buf) { int ret; int temp = 0; struct platform_device *client; struct bq27541_device_info *di; client = to_platform_device(dev); di = platform_get_drvdata(client); if (subcmd == BQ27541_SUBCMD_DEVCIE_TYPE || subcmd == BQ27541_SUBCMD_FW_VER || subcmd == BQ27541_SUBCMD_HW_VER || subcmd == BQ27541_SUBCMD_CHEM_ID) { bq27541_cntl_cmd(di, subcmd); /* Retrieve Chip status */ udelay(66); ret = bq27541_read(BQ27541_REG_CNTL, &temp, 0, di); if (ret) ret = snprintf(buf, PAGE_SIZE, "Read Error!\n"); else ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp); } else ret = snprintf(buf, PAGE_SIZE, "Register Error!\n"); return ret; } static ssize_t bq27541_write_subcmd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret = strnlen(buf, PAGE_SIZE); int cmd; sscanf(buf, "%x", &cmd); subcmd = cmd; return ret; } static DEVICE_ATTR(std_cmd, S_IRUGO|S_IWUGO, bq27541_read_stdcmd, bq27541_write_stdcmd); static DEVICE_ATTR(sub_cmd, S_IRUGO|S_IWUGO, bq27541_read_subcmd, bq27541_write_subcmd); static struct attribute *fs_attrs[] = { &dev_attr_std_cmd.attr, &dev_attr_sub_cmd.attr, NULL, }; static struct attribute_group fs_attr_group = { .attrs = fs_attrs, }; static struct platform_device this_device = { .name = "bq27541-test", .id = -1, .dev.platform_data = NULL, }; #endif #ifdef CONFIG_MACH_MSM8974_14001 /*OPPO 2013-09-18 liaofuchun add begin for bq27541 authenticate */ #define BLOCKDATACTRL 0X61 #define DATAFLASHBLOCK 0X3F #define AUTHENDATA 0X40 #define AUTHENCHECKSUM 0X54 #define MESSAGE_LEN 20 #define KEY_LEN 16 /* OPPO 2014-02-25 sjc Modify begin for FIND7OP not use authenticate */ #if defined (CONFIG_MACH_MSM8974_14001) || defined (CONFIG_OPPO_MSM_14021) static bool bq27541_authenticate(struct i2c_client *client) { return true; } #else static bool bq27541_authenticate(struct i2c_client *client) { char recv_buf[MESSAGE_LEN]={0x0}; char send_buf[MESSAGE_LEN]={0x0}; char result[MESSAGE_LEN]={0x0}; char Key[KEY_LEN]={0x77,0x30,0xa1,0x28,0x0a,0xa1,0x13,0x20,0xef,0xcd,0xab,0x89,0x67,0x45,0x23,0x01}; char checksum_buf[1] ={0x0}; char authen_cmd_buf[1] = {0x00}; int i,rc; pr_info("%s Enter\n",__func__); // step 0: produce 20 bytes random data and checksum get_random_bytes(send_buf,20); for(i = 0;i < 20;i++){ checksum_buf[0] = checksum_buf[0] + send_buf[i]; } checksum_buf[0] = 0xff - (checksum_buf[0]&0xff); /* step 1: unseal mode->write 0x01 to blockdatactrl authen_cmd_buf[0] = 0x01; rc = i2c_smbus_write_i2c_block_data(client,BLOCKDATACTRL,1,&authen_cmd_buf[0]); } */ // step 1: seal mode->write 0x00 to dataflashblock rc = i2c_smbus_write_i2c_block_data(client,DATAFLASHBLOCK,1,&authen_cmd_buf[0]); if( rc < 0 ){ pr_info("%s i2c write error\n",__func__); return false; } // step 2: write 20 bytes to authendata_reg i2c_smbus_write_i2c_block_data(client,AUTHENDATA,MESSAGE_LEN,&send_buf[0]); msleep(1); // step 3: write checksum to authenchecksum_reg for compute i2c_smbus_write_i2c_block_data(client,AUTHENCHECKSUM,1,&checksum_buf[0]); msleep(50); // step 4: read authendata i2c_smbus_read_i2c_block_data(client,AUTHENDATA,MESSAGE_LEN,&recv_buf[0]); // step 5: phone do hmac(sha1-generic) algorithm BQ27541_HMACSHA1_authenticate(send_buf,Key,result); // step 6: compare recv_buf from bq27541 and result by phone rc = strncmp(recv_buf,result,MESSAGE_LEN); if(rc == 0){ pr_info("bq27541_authenticate success\n"); return true; } pr_info("bq27541_authenticate error,dump buf:\n"); for(i = 0;i < 20;i++){ pr_info("send_buf[%d]:0x%x,recv_buf[%d]:0x%x ?= result[%d]:0x%x\n",i,send_buf[i],i,recv_buf[i],i,result[i]); } return false; } #endif //CONFIG_MACH_MSM8974_14001 /* OPPO 2014-02-25 sjc Modify end */ #endif //CONFIG_MACH_MSM8974_14001 #ifdef CONFIG_MACH_MSM8974_14001 //Fuchun.Liao@EXP.Driver,2014/01/10 add for check battery type #define BATTERY_2700MA 0 #define BATTERY_3000MA 1 #define TYPE_INFO_LEN 8 #if defined (CONFIG_MACH_MSM8974_14001) || defined (CONFIG_OPPO_MSM_14021) static int bq27541_batt_type_detect(struct i2c_client *client) { return BATTERY_3000MA; } #else //defined (CONFIG_MACH_MSM8974_14001) || defined (CONFIG_OPPO_MSM_14021) /* jingchun.wang@Onlinerd.Driver, 2014/03/10 Modify for 14001 */ static int bq27541_batt_type_detect(struct i2c_client *client) { char blockA_cmd_buf[1] = {0x01}; char rc = 0; char recv_buf[TYPE_INFO_LEN] = {0x0}; int i = 0; rc = i2c_smbus_write_i2c_block_data(client,DATAFLASHBLOCK,1,&blockA_cmd_buf[0]); if( rc < 0 ){ pr_info("%s i2c write error\n",__func__); return 0; } msleep(30); //it is needed i2c_smbus_read_i2c_block_data(client,AUTHENDATA,TYPE_INFO_LEN,&recv_buf[0]); if((recv_buf[0] == 0x01) && (recv_buf[1] == 0x09) && (recv_buf[2] == 0x08) && (recv_buf[3] == 0x06)) rc = BATTERY_2700MA; else if((recv_buf[0] == 0x02) && (recv_buf[1] == 0x00) && (recv_buf[2] == 0x01) && (recv_buf[3] == 0x03)) rc = BATTERY_3000MA; else { for(i = 0;i < TYPE_INFO_LEN;i++) pr_info("%s error,recv_buf[%d]:0x%x\n",__func__,i,recv_buf[i]); rc = BATTERY_2700MA; } pr_info("%s battery_type:%d\n",__func__,rc); return rc; } #endif //defined (CONFIG_MACH_MSM8974_14001) || defined (CONFIG_OPPO_MSM_14021) #endif //CONFIG_MACH_MSM8974_14001 /* OPPO 2013-12-12 liaofuchun add for fastchg */ #ifdef CONFIG_PIC1503_FASTCG #define AP_TX_EN GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define AP_TX_DIS GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA) static irqreturn_t irq_rx_handler(int irq, void *dev_id) { struct bq27541_device_info *di = dev_id; //pr_info("%s\n", __func__); schedule_work(&di->fastcg_work); return IRQ_HANDLED; } #define AP_SWITCH_USB GPIO_CFG(96, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) static void fastcg_work_func(struct work_struct *work) { int data = 0; int i; int bit = 0; int retval = 0; int ret_info = 0; static int fw_ver_info = 0; int volt = 0; int temp = 0; int soc = 0; int current_now = 0; int remain_cap = 0; static bool isnot_power_on = 0; free_irq(bq27541_di->irq, bq27541_di); for(i = 0; i < 7; i++) { gpio_set_value(0, 0); gpio_tlmm_config(AP_TX_EN, GPIO_CFG_ENABLE); usleep_range(1000,1000); gpio_set_value(0, 1); gpio_tlmm_config(AP_TX_DIS, GPIO_CFG_ENABLE); usleep_range(19000,19000); bit = gpio_get_value(1); data |= bit<<(6-i); if((i == 2) && (data != 0x50) && (!fw_ver_info)){ //data recvd not start from "101" pr_err("%s data err:%d\n",__func__,data); if(bq27541_di->fast_chg_started == true) { bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } power_supply_changed(bq27541_di->batt_psy); } goto out; } } pr_err("%s recv data:0x%x\n", __func__, data); //lfc add for power_supply_changed NULL pointer when batt_psy unregistered if(bq27541_di->batt_psy == NULL){ msleep(2); gpio_tlmm_config(GPIO_CFG(1,0,GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),1); gpio_direction_output(1, 0); retval = request_irq(bq27541_di->irq, irq_rx_handler, IRQF_TRIGGER_RISING, "mcu_data", bq27541_di); //0X01:rising edge,0x02:falling edge if(retval < 0) pr_err("%s request ap rx irq failed.\n", __func__); return ; } if(data == 0x52) { //request fast charging wake_lock(&bq27541_di->fastchg_wake_lock); pic_need_to_up_fw = 0; fw_ver_info = 0; bq27541_di->alow_reading = false; bq27541_di->fast_chg_started = true; bq27541_di->fast_chg_allow = false; bq27541_di->fast_normal_to_warm = false; mod_timer(&bq27541_di->watchdog, jiffies + msecs_to_jiffies(10000)); if(!isnot_power_on){ isnot_power_on = 1; ret_info = 0x1; } else { ret_info = 0x2; } } else if(data == 0x54) { //fast charge stopped bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; //switch off fast chg pr_info("%s fastchg stop unexpectly,switch off fastchg\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x58) { //tell ap can read i2c bq27541_di->alow_reading = true; //reading bq27541_di->fast_chg_ing = true; volt = bq27541_get_battery_mvolts(); temp = bq27541_get_battery_temperature(); remain_cap = bq27541_get_batt_remaining_capacity(); soc = bq27541_get_battery_soc(); current_now = bq27541_get_average_current(); pr_err("%s volt:%d,temp:%d,remain_cap:%d,soc:%d,current:%d\n",__func__,volt,temp, remain_cap,soc,current_now); //don't read bq27541_di->alow_reading = false; mod_timer(&bq27541_di->watchdog, jiffies + msecs_to_jiffies(10000)); ret_info = 0x2; } else if(data == 0x5a){ //fastchg full,vbatt > 4350 #if 0 //lfc modify for it(set fast_switch_to_normal ture) is earlier than usb_plugged_out irq(set it false) bq27541_di->fast_switch_to_normal = true; bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; #endif //switch off fast chg pr_info("%s fastchg full,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x53){ if (bq27541_di->battery_type == BATTERY_3000MA){ //13097 ATL battery //if temp:10~20 decigec,vddmax = 4250mv //switch off fast chg pr_info("%s fastchg low temp full,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x59){ //usb bad connected,stop fastchg #if 0 //lfc modify for it(set fast_switch_to_normal ture) is earlier than usb_plugged_out irq(set it false) bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = true; #endif //switch off fast chg pr_info("%s usb bad connect,switch off fastchg\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x5c){ //fastchg temp over 45 or under 20 pr_info("%s fastchg temp > 45 or < 20,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x56){ //ready to get fw_ver fw_ver_info = 1; ret_info = 0x2; } else if(fw_ver_info){ //get fw_ver //fw in local is large than mcu1503_fw_ver if((!pic_have_updated) && (Pic16F_firmware_data[pic_fw_ver_count - 4] > data)){ ret_info = 0x2; pic_need_to_up_fw = 1; //need to update fw }else{ ret_info = 0x1; pic_need_to_up_fw = 0; //fw is already new,needn't to up } pr_info("local_fw:0x%x,need_to_up_fw:%d\n",Pic16F_firmware_data[pic_fw_ver_count - 4],pic_need_to_up_fw); fw_ver_info = 0; } else { gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s data err(101xxxx) switch usb error %d\n", __func__, retval); goto out; //avoid i2c conflict } msleep(500); //avoid i2c conflict //data err bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; //data err pr_info("%s data err(101xxxx),switch off fastchg\n", __func__); power_supply_changed(bq27541_di->batt_psy); goto out; } msleep(2); gpio_tlmm_config(GPIO_CFG(1,0,GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),1); gpio_direction_output(1, 0); for(i = 0; i < 3; i++) { if(i == 0){ //tell mcu1503 battery_type gpio_set_value(1, ret_info >> 1); } else if(i == 1){ gpio_set_value(1, ret_info & 0x1); } else { gpio_set_value(1,bq27541_di->battery_type); } gpio_set_value(0, 0); gpio_tlmm_config(AP_TX_EN, GPIO_CFG_ENABLE); usleep_range(1000,1000); gpio_set_value(0, 1); gpio_tlmm_config(AP_TX_DIS, GPIO_CFG_ENABLE); usleep_range(19000,19000); } out: gpio_tlmm_config(GPIO_CFG(1,0,GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),1); gpio_direction_input(1); //lfc add for it is faster than usb_plugged_out irq to send 0x5a(fast_chg full and usb bad connected) to AP if(data == 0x5a || data == 0x59){ usleep_range(180000,180000); bq27541_di->fast_switch_to_normal = true; bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_chg_ing = false; } //fastchg temp over( > 45 or < 20) //lfc add to set fastchg vddmax = 4250mv during 10 ~ 20 decigec for ATL 3000mAH battery if(data == 0x53){ if(bq27541_di->battery_type == BATTERY_3000MA){ //13097 ATL battery usleep_range(180000,180000); bq27541_di->fast_low_temp_full = true; bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_chg_ing = false; } } //lfc add to set fastchg vddmax = 4250mv end if(data == 0x5c){ usleep_range(180000,180000); bq27541_di->fast_normal_to_warm = true; bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_chg_ing = false; } if(pic_need_to_up_fw){ msleep(500); del_timer(&bq27541_di->watchdog); pic16f_fw_update(false); pic_need_to_up_fw = 0; mod_timer(&bq27541_di->watchdog, jiffies + msecs_to_jiffies(10000)); } retval = request_irq(bq27541_di->irq, irq_rx_handler, IRQF_TRIGGER_RISING, "mcu_data", bq27541_di); //0X01:rising edge,0x02:falling edge if(retval < 0) { pr_err("%s request ap rx irq failed.\n", __func__); } if((data == 0x52) || (data == 0x58)){ power_supply_changed(bq27541_di->batt_psy); } if(data == 0x53){ if(bq27541_di->battery_type == BATTERY_3000MA){ power_supply_changed(bq27541_di->batt_psy); wake_unlock(&bq27541_di->fastchg_wake_lock); } } if((data == 0x54) || (data == 0x5a) || (data == 0x59) || (data == 0x5c)){ power_supply_changed(bq27541_di->batt_psy); wake_unlock(&bq27541_di->fastchg_wake_lock); } } void di_watchdog(unsigned long data) { struct bq27541_device_info *di = (struct bq27541_device_info *)data; int ret = 0; pr_err("di_watchdog can't receive mcu data\n"); di->alow_reading = true; di->fast_chg_started = false; di->fast_switch_to_normal = false; di->fast_low_temp_full = false; di->fast_chg_allow = false; di->fast_normal_to_warm = false; di->fast_chg_ing = false; //switch off fast chg pr_info("%s switch off fastchg\n", __func__); gpio_set_value(96, 0); mcu_en_gpio_set(1);//sjc0623 add ret = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (ret) { pr_info("%s switch usb error %d\n", __func__, ret); } wake_unlock(&bq27541_di->fastchg_wake_lock); } #endif /* OPPO 2013-12-12 liaofuchun add for fastchg */ #define MAX_RETRY_COUNT 5 static int bq27541_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { char *name; struct bq27541_device_info *di; struct bq27541_access_methods *bus; int num; int retval = 0; #ifdef CONFIG_OPPO_MSM_14021 /* OPPO 2014-06-23 sjc Add begin for 14021 */ struct device_node *dev_node = client->dev.of_node; int ret; if (dev_node) { mcu_en_gpio = of_get_named_gpio(dev_node, "microchip,mcu-en-gpio", 0); } else { mcu_en_gpio = 0; printk(KERN_ERR "%s: mcu_en_gpio failed\n", __func__); } if (gpio_is_valid(mcu_en_gpio)) { ret = gpio_request(mcu_en_gpio, "mcu_en_gpio"); if (ret) { printk(KERN_ERR "%s: gpio_request failed for %d ret=%d\n", __func__, mcu_en_gpio, ret); } else { gpio_set_value(mcu_en_gpio, 0); } } #endif //CONFIG_OPPO_MSM_14021 pr_info("%s\n", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; /* Get new ID for the new battery device */ retval = idr_pre_get(&battery_id, GFP_KERNEL); if (retval == 0) return -ENOMEM; mutex_lock(&battery_mutex); retval = idr_get_new(&battery_id, client, &num); mutex_unlock(&battery_mutex); if (retval < 0) return retval; name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); if (!name) { dev_err(&client->dev, "failed to allocate device name\n"); retval = -ENOMEM; goto batt_failed_1; } di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&client->dev, "failed to allocate device info data\n"); retval = -ENOMEM; goto batt_failed_2; } di->id = num; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { dev_err(&client->dev, "failed to allocate access method " "data\n"); retval = -ENOMEM; goto batt_failed_3; } i2c_set_clientdata(client, di); di->dev = &client->dev; bus->read = &bq27541_read_i2c; di->bus = bus; di->client = client; /* OPPO 2013-08-19 wangjc Add begin for error temp */ #ifdef CONFIG_MACH_MSM8974_14001 di->temp_pre = 0; #endif /* OPPO 2013-08-19 wangjc Add end */ di->alow_reading = true; di->fast_chg_ing = false; di->fast_low_temp_full = false; #ifdef CONFIG_MACH_MSM8974_14001 /* jingchun.wang@Onlinerd.Driver, 2014/02/12 Add for retry when config fail */ di->retry_count = MAX_RETRY_COUNT; atomic_set(&di->suspended, 0); #endif /*CONFIG_MACH_MSM8974_14001*/ #ifdef CONFIG_BQ27541_TEST_ENABLE platform_set_drvdata(&this_device, di); retval = platform_device_register(&this_device); if (!retval) { retval = sysfs_create_group(&this_device.dev.kobj, &fs_attr_group); if (retval) goto batt_failed_4; } else goto batt_failed_4; #endif if (retval) { dev_err(&client->dev, "failed to setup bq27541\n"); goto batt_failed_4; } if (retval) { dev_err(&client->dev, "failed to powerup bq27541\n"); goto batt_failed_4; } spin_lock_init(&lock); bq27541_di = di; INIT_WORK(&di->counter, bq27541_coulomb_counter_work); INIT_DELAYED_WORK(&di->hw_config, bq27541_hw_config); schedule_delayed_work(&di->hw_config, 0); /* OPPO 2013-12-22 wangjc add for fastchg*/ #ifdef CONFIG_PIC1503_FASTCG init_timer(&di->watchdog); di->watchdog.data = (unsigned long)di; di->watchdog.function = di_watchdog; wake_lock_init(&di->fastchg_wake_lock, WAKE_LOCK_SUSPEND, "fastcg_wake_lock"); INIT_WORK(&di->fastcg_work,fastcg_work_func); gpio_request(1, "mcu_clk"); gpio_tlmm_config(GPIO_CFG(1,0,GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),1); gpio_direction_input(1); di->irq = gpio_to_irq(1); retval = request_irq(di->irq, irq_rx_handler, IRQF_TRIGGER_RISING, "mcu_data", di); //0X01:rising edge,0x02:falling edge if(retval < 0) { pr_err("%s request ap rx irq failed.\n", __func__); } #endif /* OPPO 2013-12-22 wangjc add end*/ return 0; batt_failed_4: kfree(bus); batt_failed_3: kfree(di); batt_failed_2: kfree(name); batt_failed_1: mutex_lock(&battery_mutex); idr_remove(&battery_id, num); mutex_unlock(&battery_mutex); return retval; } static int bq27541_battery_remove(struct i2c_client *client) { struct bq27541_device_info *di = i2c_get_clientdata(client); #ifdef CONFIG_OPPO_MSM_14021 /* OPPO 2014-06-23 sjc Add begin for 14021 */ if (gpio_is_valid(mcu_en_gpio))//sjc0623 add gpio_free(mcu_en_gpio); #endif qpnp_battery_gauge_unregister(&bq27541_batt_gauge); bq27541_cntl_cmd(di, BQ27541_SUBCMD_DISABLE_DLOG); udelay(66); bq27541_cntl_cmd(di, BQ27541_SUBCMD_DISABLE_IT); cancel_delayed_work_sync(&di->hw_config); kfree(di->bus); mutex_lock(&battery_mutex); idr_remove(&battery_id, di->id); mutex_unlock(&battery_mutex); kfree(di); return 0; } static int bq27541_battery_suspend(struct i2c_client *client, pm_message_t message) { struct bq27541_device_info *di = i2c_get_clientdata(client); atomic_set(&di->suspended, 1); return 0; } /*1 minute*/ #define RESUME_TIME 1*60 static int bq27541_battery_resume(struct i2c_client *client) { struct bq27541_device_info *di = i2c_get_clientdata(client); atomic_set(&di->suspended, 0); bq27541_battery_soc(bq27541_di, true); return 0; } static const struct of_device_id bq27541_match[] = { { .compatible = "ti,bq27541-battery" }, { }, }; static const struct i2c_device_id bq27541_id[] = { { "bq27541-battery", 1 }, {}, }; MODULE_DEVICE_TABLE(i2c, BQ27541_id); static struct i2c_driver bq27541_battery_driver = { .driver = { .name = "bq27541-battery", .owner = THIS_MODULE, .of_match_table = bq27541_match, }, .probe = bq27541_battery_probe, .remove = bq27541_battery_remove, .suspend = bq27541_battery_suspend , .resume = bq27541_battery_resume, .id_table = bq27541_id, }; static int __init bq27541_battery_init(void) { int ret; ret = i2c_add_driver(&bq27541_battery_driver); if (ret) printk(KERN_ERR "Unable to register BQ27541 driver\n"); return ret; } module_init(bq27541_battery_init); static void __exit bq27541_battery_exit(void) { i2c_del_driver(&bq27541_battery_driver); } module_exit(bq27541_battery_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Qualcomm Innovation Center, Inc."); MODULE_DESCRIPTION("BQ27541 battery monitor driver");
atila1974/AK-OnePlusOne-CAF
drivers/power/bq27541_fuelgauger.c
C
gpl-2.0
50,014
/* * linux/fs/inode.c * * (C) 1997 Linus Torvalds */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/dcache.h> #include <linux/init.h> #include <linux/quotaops.h> #include <linux/slab.h> #include <linux/writeback.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/wait.h> #include <linux/rwsem.h> #include <linux/hash.h> #include <linux/swap.h> #include <linux/security.h> #include <linux/pagemap.h> #include <linux/cdev.h> #include <linux/bootmem.h> #include <linux/inotify.h> #include <linux/fsnotify.h> #include <linux/mount.h> #include <linux/async.h> #include <linux/posix_acl.h> /* * This is needed for the following functions: * - inode_has_buffers * - invalidate_inode_buffers * - invalidate_bdev * * FIXME: remove all knowledge of the buffer layer from this file */ #include <linux/buffer_head.h> /* * New inode.c implementation. * * This implementation has the basic premise of trying * to be extremely low-overhead and SMP-safe, yet be * simple enough to be "obviously correct". * * Famous last words. */ /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ /* #define INODE_PARANOIA 1 */ /* #define INODE_DEBUG 1 */ /* * Inode lookup is no longer as critical as it used to be: * most of the lookups are going to be through the dcache. */ #define I_HASHBITS i_hash_shift #define I_HASHMASK i_hash_mask static unsigned int i_hash_mask __read_mostly; static unsigned int i_hash_shift __read_mostly; /* * Each inode can be on two separate lists. One is * the hash list of the inode, used for lookups. The * other linked list is the "type" list: * "in_use" - valid inode, i_count > 0, i_nlink > 0 * "dirty" - as "in_use" but also dirty * "unused" - valid inode, i_count = 0 * * A "dirty" list is maintained for each super block, * allowing for low-overhead inode sync() operations. */ LIST_HEAD(inode_in_use); LIST_HEAD(inode_unused); static struct hlist_head *inode_hashtable __read_mostly; /* * A simple spinlock to protect the list manipulations. * * NOTE! You also have to own the lock if you change * the i_state of an inode while it is in use.. */ DEFINE_SPINLOCK(inode_lock); /* * iprune_sem provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. * * We make this an rwsem because the fastpath is icache shrinking. In * some cases a filesystem may be doing a significant amount of work in * its inode reclaim code, so this should improve parallelism. */ static DECLARE_RWSEM(iprune_sem); /* * Statistics gathering.. */ struct inodes_stat_t inodes_stat; static struct kmem_cache *inode_cachep __read_mostly; static void wake_up_inode(struct inode *inode) { /* * Prevent speculative execution through spin_unlock(&inode_lock); */ smp_mb(); wake_up_bit(&inode->i_state, __I_LOCK); } /** * inode_init_always - perform inode structure intialisation * @sb: superblock inode belongs to * @inode: inode to initialise * * These are initializations that need to be done on every inode * allocation as the fields are not initialised by slab allocation. */ int inode_init_always(struct super_block *sb, struct inode *inode) { static const struct address_space_operations empty_aops; static const struct inode_operations empty_iops; static const struct file_operations empty_fops; struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); inode->i_op = &empty_iops; inode->i_fop = &empty_fops; inode->i_nlink = 1; inode->i_uid = 0; inode->i_gid = 0; atomic_set(&inode->i_writecount, 0); inode->i_size = 0; inode->i_blocks = 0; inode->i_bytes = 0; inode->i_generation = 0; #ifdef CONFIG_QUOTA memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); #endif inode->i_pipe = NULL; inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_rdev = 0; inode->dirtied_when = 0; if (security_inode_alloc(inode)) goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); mutex_init(&inode->i_mutex); lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); init_rwsem(&inode->i_alloc_sem); lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); mapping->a_ops = &empty_aops; mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping->assoc_mapping = NULL; mapping->backing_dev_info = &default_backing_dev_info; mapping->writeback_index = 0; /* * If the block_device provides a backing_dev_info for client * inodes then use that. Otherwise the inode share the bdev's * backing_dev_info. */ if (sb->s_bdev) { struct backing_dev_info *bdi; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; mapping->backing_dev_info = bdi; } inode->i_private = NULL; inode->i_mapping = mapping; #ifdef CONFIG_FS_POSIX_ACL inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; #endif #ifdef CONFIG_FSNOTIFY inode->i_fsnotify_mask = 0; #endif return 0; out: return -ENOMEM; } EXPORT_SYMBOL(inode_init_always); static struct inode *alloc_inode(struct super_block *sb) { struct inode *inode; if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); if (!inode) return NULL; if (unlikely(inode_init_always(sb, inode))) { if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, inode); return NULL; } return inode; } void __destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); security_inode_free(inode); fsnotify_inode_delete(inode); #ifdef CONFIG_FS_POSIX_ACL if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_acl); if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_default_acl); #endif } EXPORT_SYMBOL(__destroy_inode); void destroy_inode(struct inode *inode) { __destroy_inode(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, (inode)); } /* * These are initializations that only need to be done * once, because the fields are idempotent across use * of the inode, so let the slab aware of that. */ void inode_init_once(struct inode *inode) { memset(inode, 0, sizeof(*inode)); INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_dentry); INIT_LIST_HEAD(&inode->i_devices); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); spin_lock_init(&inode->i_data.tree_lock); spin_lock_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); i_size_ordered_init(inode); #ifdef CONFIG_INOTIFY INIT_LIST_HEAD(&inode->inotify_watches); mutex_init(&inode->inotify_mutex); #endif #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); #endif } EXPORT_SYMBOL(inode_init_once); static void init_once(void *foo) { struct inode *inode = (struct inode *) foo; inode_init_once(inode); } /* * inode_lock must be held */ void __iget(struct inode *inode) { if (atomic_read(&inode->i_count)) { atomic_inc(&inode->i_count); return; } atomic_inc(&inode->i_count); if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_in_use); inodes_stat.nr_unused--; } /** * clear_inode - clear an inode * @inode: inode to clear * * This is called by the filesystem to tell us * that the inode is no longer useful. We just * terminate it with extreme prejudice. */ void clear_inode(struct inode *inode) { might_sleep(); invalidate_inode_buffers(inode); BUG_ON(inode->i_data.nrpages); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); inode_sync_wait(inode); vfs_dq_drop(inode); if (inode->i_sb->s_op->clear_inode) inode->i_sb->s_op->clear_inode(inode); if (S_ISBLK(inode->i_mode) && inode->i_bdev) bd_forget(inode); if (S_ISCHR(inode->i_mode) && inode->i_cdev) cd_forget(inode); inode->i_state = I_CLEAR; } EXPORT_SYMBOL(clear_inode); /* * dispose_list - dispose of the contents of a local list * @head: the head of the list to free * * Dispose-list gets a local list with local inodes in it, so it doesn't * need to worry about list corruption and SMP locks. */ static void dispose_list(struct list_head *head) { int nr_disposed = 0; while (!list_empty(head)) { struct inode *inode; inode = list_first_entry(head, struct inode, i_list); list_del(&inode->i_list); if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); spin_lock(&inode_lock); hlist_del_init(&inode->i_hash); list_del_init(&inode->i_sb_list); spin_unlock(&inode_lock); wake_up_inode(inode); destroy_inode(inode); nr_disposed++; } spin_lock(&inode_lock); inodes_stat.nr_inodes -= nr_disposed; spin_unlock(&inode_lock); } /* * Invalidate all inodes for a device. */ static int invalidate_list(struct list_head *head, struct list_head *dispose) { struct list_head *next; int busy = 0, count = 0; next = head->next; for (;;) { struct list_head *tmp = next; struct inode *inode; /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not * change during umount anymore, and because iprune_sem keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); next = next->next; if (tmp == head) break; inode = list_entry(tmp, struct inode, i_sb_list); if (inode->i_state & I_NEW) continue; invalidate_inode_buffers(inode); if (!atomic_read(&inode->i_count)) { list_move(&inode->i_list, dispose); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; count++; continue; } busy = 1; } /* only unused inodes may be cached with i_count zero */ inodes_stat.nr_unused -= count; return busy; } /** * invalidate_inodes - discard the inodes on a device * @sb: superblock * * Discard all of the inodes for a given superblock. If the discard * fails because there are busy inodes then a non zero value is returned. * If the discard is successful all the inodes have been discarded. */ int invalidate_inodes(struct super_block *sb) { int busy; LIST_HEAD(throw_away); down_write(&iprune_sem); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); fsnotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); up_write(&iprune_sem); return busy; } EXPORT_SYMBOL(invalidate_inodes); static int can_unuse(struct inode *inode) { if (inode->i_state) return 0; if (inode_has_buffers(inode)) return 0; if (atomic_read(&inode->i_count)) return 0; if (inode->i_data.nrpages) return 0; return 1; } /* * Scan `goal' inodes on the unused list for freeable ones. They are moved to * a temporary list and then are freed outside inode_lock by dispose_list(). * * Any inodes which are pinned purely because of attached pagecache have their * pagecache removed. We expect the final iput() on that inode to add it to * the front of the inode_unused list. So look for it there and if the * inode is still freeable, proceed. The right inode is found 99.9% of the * time in testing on a 4-way. * * If the inode has metadata buffers attached to mapping->private_list then * try to remove them. */ static void prune_icache(int nr_to_scan) { LIST_HEAD(freeable); int nr_pruned = 0; int nr_scanned; unsigned long reap = 0; down_read(&iprune_sem); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; if (list_empty(&inode_unused)) break; inode = list_entry(inode_unused.prev, struct inode, i_list); if (inode->i_state || atomic_read(&inode->i_count)) { list_move(&inode->i_list, &inode_unused); continue; } if (inode_has_buffers(inode) || inode->i_data.nrpages) { __iget(inode); spin_unlock(&inode_lock); if (remove_inode_buffers(inode)) reap += invalidate_mapping_pages(&inode->i_data, 0, -1); iput(inode); spin_lock(&inode_lock); if (inode != list_entry(inode_unused.next, struct inode, i_list)) continue; /* wrong inode or list_empty */ if (!can_unuse(inode)) continue; } list_move(&inode->i_list, &freeable); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; nr_pruned++; } inodes_stat.nr_unused -= nr_pruned; if (current_is_kswapd()) __count_vm_events(KSWAPD_INODESTEAL, reap); else __count_vm_events(PGINODESTEAL, reap); spin_unlock(&inode_lock); dispose_list(&freeable); up_read(&iprune_sem); } /* * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, * "unused" means that no dentries are referring to the inodes: the files are * not open and the dcache references to those inodes have already been * reclaimed. * * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ static int shrink_icache_memory(int nr, gfp_t gfp_mask) { if (nr) { /* * Nasty deadlock avoidance. We may hold various FS locks, * and we don't want to recurse into the FS that called us * in clear_inode() and friends.. */ if (!(gfp_mask & __GFP_FS)) return -1; prune_icache(nr); } return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; } static struct shrinker icache_shrinker = { .shrink = shrink_icache_memory, .seeks = DEFAULT_SEEKS, }; static void __wait_on_freeing_inode(struct inode *inode); /* * Called with the inode lock held. * NOTE: we are not increasing the inode-refcount, you must call __iget() * by hand after calling find_inode now! This simplifies iunique and won't * add any additional branch in the common code. */ static struct inode *find_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data) { struct hlist_node *node; struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, node, head, i_hash) { if (inode->i_sb != sb) continue; if (!test(inode, data)) continue; if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } break; } return node ? inode : NULL; } /* * find_inode_fast is the fast path version of find_inode, see the comment at * iget_locked for details. */ static struct inode *find_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct hlist_node *node; struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, node, head, i_hash) { if (inode->i_ino != ino) continue; if (inode->i_sb != sb) continue; if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { __wait_on_freeing_inode(inode); goto repeat; } break; } return node ? inode : NULL; } static unsigned long hash(struct super_block *sb, unsigned long hashval) { unsigned long tmp; tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / L1_CACHE_BYTES; tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); return tmp & I_HASHMASK; } static inline void __inode_add_to_lists(struct super_block *sb, struct hlist_head *head, struct inode *inode) { inodes_stat.nr_inodes++; list_add(&inode->i_list, &inode_in_use); list_add(&inode->i_sb_list, &sb->s_inodes); if (head) hlist_add_head(&inode->i_hash, head); } /** * inode_add_to_lists - add a new inode to relevant lists * @sb: superblock inode belongs to * @inode: inode to mark in use * * When an inode is allocated it needs to be accounted for, added to the in use * list, the owning superblock and the inode hash. This needs to be done under * the inode_lock, so export a function to do this rather than the inode lock * itself. We calculate the hash list to add to here so it is all internal * which requires the caller to have already set up the inode number in the * inode to add. */ void inode_add_to_lists(struct super_block *sb, struct inode *inode) { struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); spin_lock(&inode_lock); __inode_add_to_lists(sb, head, inode); spin_unlock(&inode_lock); } EXPORT_SYMBOL_GPL(inode_add_to_lists); /** * new_inode - obtain an inode * @sb: superblock * * Allocates a new inode for given superblock. The default gfp_mask * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. * If HIGHMEM pages are unsuitable or it is known that pages allocated * for the page cache are not reclaimable or migratable, * mapping_set_gfp_mask() must be called with suitable flags on the * newly created inode's mapping * */ struct inode *new_inode(struct super_block *sb) { /* * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW * error if st_ino won't fit in target struct field. Use 32bit counter * here to attempt to avoid that. */ static unsigned int last_ino; struct inode *inode; spin_lock_prefetch(&inode_lock); inode = alloc_inode(sb); if (inode) { spin_lock(&inode_lock); __inode_add_to_lists(sb, NULL, inode); inode->i_ino = ++last_ino; inode->i_state = 0; spin_unlock(&inode_lock); } return inode; } EXPORT_SYMBOL(new_inode); void unlock_new_inode(struct inode *inode) { #ifdef CONFIG_DEBUG_LOCK_ALLOC if (inode->i_mode & S_IFDIR) { struct file_system_type *type = inode->i_sb->s_type; /* Set new key only if filesystem hasn't already changed it */ if (!lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { /* * ensure nobody is actually holding i_mutex */ mutex_destroy(&inode->i_mutex); mutex_init(&inode->i_mutex); lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key); } } #endif /* * This is special! We do not need the spinlock when clearing I_LOCK, * because we're guaranteed that nobody else tries to do anything about * the state of the inode when it is locked, as we just created it (so * there can be no old holders that haven't tested I_LOCK). * However we must emit the memory barrier so that other CPUs reliably * see the clearing of I_LOCK after the other inode initialisation has * completed. */ smp_mb(); WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); inode->i_state &= ~(I_LOCK|I_NEW); wake_up_inode(inode); } EXPORT_SYMBOL(unlock_new_inode); /* * This is called without the inode lock held.. Be careful. * * We no longer cache the sb_flags in i_flags - see fs.h * -- rmk@arm.uk.linux.org */ static struct inode *get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) { struct inode *inode; inode = alloc_inode(sb); if (inode) { struct inode *old; spin_lock(&inode_lock); /* We released the lock, so.. */ old = find_inode(sb, head, test, data); if (!old) { if (set(inode, data)) goto set_failed; __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); /* Return the locked inode with I_NEW set, the * caller is responsible for filling in the contents */ return inode; } /* * Uhhuh, somebody else created the same inode under * us. Use the old inode instead of the one we just * allocated. */ __iget(old); spin_unlock(&inode_lock); destroy_inode(inode); inode = old; wait_on_inode(inode); } return inode; set_failed: spin_unlock(&inode_lock); destroy_inode(inode); return NULL; } /* * get_new_inode_fast is the fast path version of get_new_inode, see the * comment at iget_locked for details. */ static struct inode *get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct inode *inode; inode = alloc_inode(sb); if (inode) { struct inode *old; spin_lock(&inode_lock); /* We released the lock, so.. */ old = find_inode_fast(sb, head, ino); if (!old) { inode->i_ino = ino; __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); /* Return the locked inode with I_NEW set, the * caller is responsible for filling in the contents */ return inode; } /* * Uhhuh, somebody else created the same inode under * us. Use the old inode instead of the one we just * allocated. */ __iget(old); spin_unlock(&inode_lock); destroy_inode(inode); inode = old; wait_on_inode(inode); } return inode; } /** * iunique - get a unique inode number * @sb: superblock * @max_reserved: highest reserved inode number * * Obtain an inode number that is unique on the system for a given * superblock. This is used by file systems that have no natural * permanent inode numbering system. An inode number is returned that * is higher than the reserved limit but unique. * * BUGS: * With a large number of inodes live on the file system this function * currently becomes quite slow. */ ino_t iunique(struct super_block *sb, ino_t max_reserved) { /* * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW * error if st_ino won't fit in target struct field. Use 32bit counter * here to attempt to avoid that. */ static unsigned int counter; struct inode *inode; struct hlist_head *head; ino_t res; spin_lock(&inode_lock); do { if (counter <= max_reserved) counter = max_reserved + 1; res = counter++; head = inode_hashtable + hash(sb, res); inode = find_inode_fast(sb, head, res); } while (inode != NULL); spin_unlock(&inode_lock); return res; } EXPORT_SYMBOL(iunique); struct inode *igrab(struct inode *inode) { spin_lock(&inode_lock); if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) __iget(inode); else /* * Handle the case where s_op->clear_inode is not been * called yet, and somebody is calling igrab * while the inode is getting freed. */ inode = NULL; spin_unlock(&inode_lock); return inode; } EXPORT_SYMBOL(igrab); /** * ifind - internal function, you want ilookup5() or iget5(). * @sb: super block of file system to search * @head: the head of the list to search * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test * @wait: if true wait for the inode to be unlocked, if false do not * * ifind() searches for the inode specified by @data in the inode * cache. This is a generalized version of ifind_fast() for file systems where * the inode number is not sufficient for unique identification of an inode. * * If the inode is in the cache, the inode is returned with an incremented * reference count. * * Otherwise NULL is returned. * * Note, @test is called with the inode_lock held, so can't sleep. */ static struct inode *ifind(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data, const int wait) { struct inode *inode; spin_lock(&inode_lock); inode = find_inode(sb, head, test, data); if (inode) { __iget(inode); spin_unlock(&inode_lock); if (likely(wait)) wait_on_inode(inode); return inode; } spin_unlock(&inode_lock); return NULL; } /** * ifind_fast - internal function, you want ilookup() or iget(). * @sb: super block of file system to search * @head: head of the list to search * @ino: inode number to search for * * ifind_fast() searches for the inode @ino in the inode cache. This is for * file systems where the inode number is sufficient for unique identification * of an inode. * * If the inode is in the cache, the inode is returned with an incremented * reference count. * * Otherwise NULL is returned. */ static struct inode *ifind_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) { struct inode *inode; spin_lock(&inode_lock); inode = find_inode_fast(sb, head, ino); if (inode) { __iget(inode); spin_unlock(&inode_lock); wait_on_inode(inode); return inode; } spin_unlock(&inode_lock); return NULL; } /** * ilookup5_nowait - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test * * ilookup5() uses ifind() to search for the inode specified by @hashval and * @data in the inode cache. This is a generalized version of ilookup() for * file systems where the inode number is not sufficient for unique * identification of an inode. * * If the inode is in the cache, the inode is returned with an incremented * reference count. Note, the inode lock is not waited upon so you have to be * very careful what you do with the returned inode. You probably should be * using ilookup5() instead. * * Otherwise NULL is returned. * * Note, @test is called with the inode_lock held, so can't sleep. */ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct hlist_head *head = inode_hashtable + hash(sb, hashval); return ifind(sb, head, test, data, 0); } EXPORT_SYMBOL(ilookup5_nowait); /** * ilookup5 - search for an inode in the inode cache * @sb: super block of file system to search * @hashval: hash value (usually inode number) to search for * @test: callback used for comparisons between inodes * @data: opaque data pointer to pass to @test * * ilookup5() uses ifind() to search for the inode specified by @hashval and * @data in the inode cache. This is a generalized version of ilookup() for * file systems where the inode number is not sufficient for unique * identification of an inode. * * If the inode is in the cache, the inode lock is waited upon and the inode is * returned with an incremented reference count. * * Otherwise NULL is returned. * * Note, @test is called with the inode_lock held, so can't sleep. */ struct inode *ilookup5(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct hlist_head *head = inode_hashtable + hash(sb, hashval); return ifind(sb, head, test, data, 1); } EXPORT_SYMBOL(ilookup5); /** * ilookup - search for an inode in the inode cache * @sb: super block of file system to search * @ino: inode number to search for * * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. * This is for file systems where the inode number is sufficient for unique * identification of an inode. * * If the inode is in the cache, the inode is returned with an incremented * reference count. * * Otherwise NULL is returned. */ struct inode *ilookup(struct super_block *sb, unsigned long ino) { struct hlist_head *head = inode_hashtable + hash(sb, ino); return ifind_fast(sb, head, ino); } EXPORT_SYMBOL(ilookup); /** * iget5_locked - obtain an inode from a mounted file system * @sb: super block of file system * @hashval: hash value (usually inode number) to get * @test: callback used for comparisons between inodes * @set: callback used to initialize a new struct inode * @data: opaque data pointer to pass to @test and @set * * iget5_locked() uses ifind() to search for the inode specified by @hashval * and @data in the inode cache and if present it is returned with an increased * reference count. This is a generalized version of iget_locked() for file * systems where the inode number is not sufficient for unique identification * of an inode. * * If the inode is not in cache, get_new_inode() is called to allocate a new * inode and this is returned locked, hashed, and with the I_NEW flag set. The * file system gets to fill it in before unlocking it via unlock_new_inode(). * * Note both @test and @set are called with the inode_lock held, so can't sleep. */ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) { struct hlist_head *head = inode_hashtable + hash(sb, hashval); struct inode *inode; inode = ifind(sb, head, test, data, 1); if (inode) return inode; /* * get_new_inode() will do the right thing, re-trying the search * in case it had to block at any point. */ return get_new_inode(sb, head, test, set, data); } EXPORT_SYMBOL(iget5_locked); /** * iget_locked - obtain an inode from a mounted file system * @sb: super block of file system * @ino: inode number to get * * iget_locked() uses ifind_fast() to search for the inode specified by @ino in * the inode cache and if present it is returned with an increased reference * count. This is for file systems where the inode number is sufficient for * unique identification of an inode. * * If the inode is not in cache, get_new_inode_fast() is called to allocate a * new inode and this is returned locked, hashed, and with the I_NEW flag set. * The file system gets to fill it in before unlocking it via * unlock_new_inode(). */ struct inode *iget_locked(struct super_block *sb, unsigned long ino) { struct hlist_head *head = inode_hashtable + hash(sb, ino); struct inode *inode; inode = ifind_fast(sb, head, ino); if (inode) return inode; /* * get_new_inode_fast() will do the right thing, re-trying the search * in case it had to block at any point. */ return get_new_inode_fast(sb, head, ino); } EXPORT_SYMBOL(iget_locked); int insert_inode_locked(struct inode *inode) { struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; struct hlist_head *head = inode_hashtable + hash(sb, ino); inode->i_state |= I_LOCK|I_NEW; while (1) { struct hlist_node *node; struct inode *old = NULL; spin_lock(&inode_lock); hlist_for_each_entry(old, node, head, i_hash) { if (old->i_ino != ino) continue; if (old->i_sb != sb) continue; if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) continue; break; } if (likely(!node)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; } __iget(old); spin_unlock(&inode_lock); wait_on_inode(old); if (unlikely(!hlist_unhashed(&old->i_hash))) { iput(old); return -EBUSY; } iput(old); } } EXPORT_SYMBOL(insert_inode_locked); int insert_inode_locked4(struct inode *inode, unsigned long hashval, int (*test)(struct inode *, void *), void *data) { struct super_block *sb = inode->i_sb; struct hlist_head *head = inode_hashtable + hash(sb, hashval); inode->i_state |= I_LOCK|I_NEW; while (1) { struct hlist_node *node; struct inode *old = NULL; spin_lock(&inode_lock); hlist_for_each_entry(old, node, head, i_hash) { if (old->i_sb != sb) continue; if (!test(old, data)) continue; if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) continue; break; } if (likely(!node)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; } __iget(old); spin_unlock(&inode_lock); wait_on_inode(old); if (unlikely(!hlist_unhashed(&old->i_hash))) { iput(old); return -EBUSY; } iput(old); } } EXPORT_SYMBOL(insert_inode_locked4); /** * __insert_inode_hash - hash an inode * @inode: unhashed inode * @hashval: unsigned long value used to locate this object in the * inode_hashtable. * * Add an inode to the inode hash for this superblock. */ void __insert_inode_hash(struct inode *inode, unsigned long hashval) { struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); spin_lock(&inode_lock); hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); } EXPORT_SYMBOL(__insert_inode_hash); /** * remove_inode_hash - remove an inode from the hash * @inode: inode to unhash * * Remove an inode from the superblock. */ void remove_inode_hash(struct inode *inode) { spin_lock(&inode_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode_lock); } EXPORT_SYMBOL(remove_inode_hash); /* * Tell the filesystem that this inode is no longer of any interest and should * be completely destroyed. * * We leave the inode in the inode hash table until *after* the filesystem's * ->delete_inode completes. This ensures that an iget (such as nfsd might * instigate) will always find up-to-date information either in the hash or on * disk. * * I_FREEING is set so that no-one will take a new reference to the inode while * it is being deleted. */ void generic_delete_inode(struct inode *inode) { const struct super_operations *op = inode->i_sb->s_op; list_del_init(&inode->i_list); list_del_init(&inode->i_sb_list); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); security_inode_delete(inode); if (op->delete_inode) { void (*delete)(struct inode *) = op->delete_inode; if (!is_bad_inode(inode)) vfs_dq_init(inode); /* Filesystems implementing their own * s_op->delete_inode are required to call * truncate_inode_pages and clear_inode() * internally */ delete(inode); } else { truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); } spin_lock(&inode_lock); hlist_del_init(&inode->i_hash); spin_unlock(&inode_lock); wake_up_inode(inode); BUG_ON(inode->i_state != I_CLEAR); destroy_inode(inode); } EXPORT_SYMBOL(generic_delete_inode); /** * generic_detach_inode - remove inode from inode lists * @inode: inode to remove * * Remove inode from inode lists, write it if it's dirty. This is just an * internal VFS helper exported for hugetlbfs. Do not use! * * Returns 1 if inode should be completely destroyed. */ int generic_detach_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; if (!hlist_unhashed(&inode->i_hash)) { if (!(inode->i_state & (I_DIRTY|I_SYNC))) list_move(&inode->i_list, &inode_unused); inodes_stat.nr_unused++; if (sb->s_flags & MS_ACTIVE) { spin_unlock(&inode_lock); return 0; } WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_WILL_FREE; spin_unlock(&inode_lock); write_inode_now(inode, 1); spin_lock(&inode_lock); WARN_ON(inode->i_state & I_NEW); inode->i_state &= ~I_WILL_FREE; inodes_stat.nr_unused--; hlist_del_init(&inode->i_hash); } list_del_init(&inode->i_list); list_del_init(&inode->i_sb_list); WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); return 1; } EXPORT_SYMBOL_GPL(generic_detach_inode); static void generic_forget_inode(struct inode *inode) { if (!generic_detach_inode(inode)) return; if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); wake_up_inode(inode); destroy_inode(inode); } /* * Normal UNIX filesystem behaviour: delete the * inode when the usage count drops to zero, and * i_nlink is zero. */ void generic_drop_inode(struct inode *inode) { if (!inode->i_nlink) generic_delete_inode(inode); else generic_forget_inode(inode); } EXPORT_SYMBOL_GPL(generic_drop_inode); /* * Called when we're dropping the last reference * to an inode. * * Call the FS "drop()" function, defaulting to * the legacy UNIX filesystem behaviour.. * * NOTE! NOTE! NOTE! We're called with the inode lock * held, and the drop function is supposed to release * the lock! */ static inline void iput_final(struct inode *inode) { const struct super_operations *op = inode->i_sb->s_op; void (*drop)(struct inode *) = generic_drop_inode; if (op && op->drop_inode) drop = op->drop_inode; drop(inode); } /** * iput - put an inode * @inode: inode to put * * Puts an inode, dropping its usage count. If the inode use count hits * zero, the inode is then freed and may also be destroyed. * * Consequently, iput() can sleep. */ void iput(struct inode *inode) { if (inode) { BUG_ON(inode->i_state == I_CLEAR); if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) iput_final(inode); } } EXPORT_SYMBOL(iput); /** * bmap - find a block number in a file * @inode: inode of file * @block: block to find * * Returns the block number on the device holding the inode that * is the disk block number for the block of the file requested. * That is, asked for block 4 of inode 1 the function will return the * disk block relative to the disk start that holds that block of the * file. */ sector_t bmap(struct inode *inode, sector_t block) { sector_t res = 0; if (inode->i_mapping->a_ops->bmap) res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); return res; } EXPORT_SYMBOL(bmap); /* * With relative atime, only update atime if the previous atime is * earlier than either the ctime or mtime or if at least a day has * passed since the last atime update. */ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, struct timespec now) { if (!(mnt->mnt_flags & MNT_RELATIME)) return 1; /* * Is mtime younger than atime? If yes, update atime: */ if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) return 1; /* * Is ctime younger than atime? If yes, update atime: */ if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) return 1; /* * Is the previous atime value older than a day? If yes, * update atime: */ if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) return 1; /* * Good, we can skip the atime update: */ return 0; } /** * touch_atime - update the access time * @mnt: mount the inode is accessed on * @dentry: dentry accessed * * Update the accessed time on an inode and mark it for writeback. * This function automatically handles read only file systems and media, * as well as the "noatime" flag and inode specific "noatime" markers. */ void touch_atime(struct vfsmount *mnt, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct timespec now; if (inode->i_flags & S_NOATIME) return; if (IS_NOATIME(inode)) return; if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) return; if (mnt->mnt_flags & MNT_NOATIME) return; if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) return; now = current_fs_time(inode->i_sb); if (!relatime_need_update(mnt, inode, now)) return; if (timespec_equal(&inode->i_atime, &now)) return; if (mnt_want_write(mnt)) return; inode->i_atime = now; mark_inode_dirty_sync(inode); mnt_drop_write(mnt); } EXPORT_SYMBOL(touch_atime); /** * file_update_time - update mtime and ctime time * @file: file accessed * * Update the mtime and ctime members of an inode and mark the inode * for writeback. Note that this function is meant exclusively for * usage in the file write path of filesystems, and filesystems may * choose to explicitly ignore update via this function with the * S_NOCMTIME inode flag, e.g. for network filesystem where these * timestamps are handled by the server. */ void file_update_time(struct file *file) { struct inode *inode = file->f_path.dentry->d_inode; struct timespec now; enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; /* First try to exhaust all avenues to not sync */ if (IS_NOCMTIME(inode)) return; now = current_fs_time(inode->i_sb); if (!timespec_equal(&inode->i_mtime, &now)) sync_it = S_MTIME; if (!timespec_equal(&inode->i_ctime, &now)) sync_it |= S_CTIME; if (IS_I_VERSION(inode)) sync_it |= S_VERSION; if (!sync_it) return; /* Finally allowed to write? Takes lock. */ if (mnt_want_write_file(file)) return; /* Only change inode inside the lock region */ if (sync_it & S_VERSION) inode_inc_iversion(inode); if (sync_it & S_CTIME) inode->i_ctime = now; if (sync_it & S_MTIME) inode->i_mtime = now; mark_inode_dirty_sync(inode); mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(file_update_time); int inode_needs_sync(struct inode *inode) { if (IS_SYNC(inode)) return 1; if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) return 1; return 0; } EXPORT_SYMBOL(inode_needs_sync); int inode_wait(void *word) { schedule(); return 0; } EXPORT_SYMBOL(inode_wait); /* * If we try to find an inode in the inode hash while it is being * deleted, we have to wait until the filesystem completes its * deletion before reporting that it isn't found. This function waits * until the deletion _might_ have completed. Callers are responsible * to recheck inode state. * * It doesn't matter if I_LOCK is not set initially, a call to * wake_up_inode() after removing from the hash list will DTRT. * * This is called with inode_lock held. */ static void __wait_on_freeing_inode(struct inode *inode) { wait_queue_head_t *wq; DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); wq = bit_waitqueue(&inode->i_state, __I_LOCK); prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode_lock); schedule(); finish_wait(wq, &wait.wait); spin_lock(&inode_lock); } static __initdata unsigned long ihash_entries; static int __init set_ihash_entries(char *str) { if (!str) return 0; ihash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("ihash_entries=", set_ihash_entries); /* * Initialize the waitqueues and inode hash table. */ void __init inode_init_early(void) { int loop; /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ if (hashdist) return; inode_hashtable = alloc_large_system_hash("Inode-cache", sizeof(struct hlist_head), ihash_entries, 14, HASH_EARLY, &i_hash_shift, &i_hash_mask, 0); for (loop = 0; loop < (1 << i_hash_shift); loop++) INIT_HLIST_HEAD(&inode_hashtable[loop]); } void __init inode_init(void) { int loop; /* inode slab cache */ inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| SLAB_MEM_SPREAD), init_once); register_shrinker(&icache_shrinker); /* Hash may have been set up in inode_init_early */ if (!hashdist) return; inode_hashtable = alloc_large_system_hash("Inode-cache", sizeof(struct hlist_head), ihash_entries, 14, 0, &i_hash_shift, &i_hash_mask, 0); for (loop = 0; loop < (1 << i_hash_shift); loop++) INIT_HLIST_HEAD(&inode_hashtable[loop]); } void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) { inode->i_mode = mode; if (S_ISCHR(mode)) { inode->i_fop = &def_chr_fops; inode->i_rdev = rdev; } else if (S_ISBLK(mode)) { inode->i_fop = &def_blk_fops; inode->i_rdev = rdev; } else if (S_ISFIFO(mode)) inode->i_fop = &def_fifo_fops; else if (S_ISSOCK(mode)) inode->i_fop = &bad_sock_fops; else printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" " inode %s:%lu\n", mode, inode->i_sb->s_id, inode->i_ino); } EXPORT_SYMBOL(init_special_inode);
liquidware/liquidware_beagleboard_android_kernel
fs/inode.c
C
gpl-2.0
43,307
/* * drivers/i2c/busses/i2c-tegra.c * * Copyright (C) 2010 Google, Inc. * Author: Colin Cross <ccross@android.com> * * Copyright (C) 2010-2012 NVIDIA Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /*#define DEBUG 1*/ /*#define VERBOSE_DEBUG 1*/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c-tegra.h> #include <linux/of_i2c.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <mach/clk.h> #include <mach/pinmux.h> #define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000)) #define TEGRA_I2C_RETRIES 3 #define BYTES_PER_FIFO_WORD 4 #define I2C_UNKNOWN_RETRY_TIME 500 #define I2C_CNFG 0x000 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 #define I2C_CNFG_PACKET_MODE_EN (1<<10) #define I2C_CNFG_NEW_MASTER_FSM (1<<11) #define I2C_STATUS 0x01C #define I2C_STATUS_BUSY (1<<8) #define I2C_SL_CNFG 0x020 #define I2C_SL_CNFG_NACK (1<<1) #define I2C_SL_CNFG_NEWSL (1<<2) #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 #define I2C_TX_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 #define I2C_FIFO_CONTROL 0x05c #define I2C_FIFO_CONTROL_TX_FLUSH (1<<1) #define I2C_FIFO_CONTROL_RX_FLUSH (1<<0) #define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5 #define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2 #define I2C_FIFO_STATUS 0x060 #define I2C_FIFO_STATUS_TX_MASK 0xF0 #define I2C_FIFO_STATUS_TX_SHIFT 4 #define I2C_FIFO_STATUS_RX_MASK 0x0F #define I2C_FIFO_STATUS_RX_SHIFT 0 #define I2C_INT_MASK 0x064 #define I2C_INT_STATUS 0x068 #define I2C_INT_PACKET_XFER_COMPLETE (1<<7) #define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6) #define I2C_INT_TX_FIFO_OVERFLOW (1<<5) #define I2C_INT_RX_FIFO_UNDERFLOW (1<<4) #define I2C_INT_NO_ACK (1<<3) #define I2C_INT_ARBITRATION_LOST (1<<2) #define I2C_INT_TX_FIFO_DATA_REQ (1<<1) #define I2C_INT_RX_FIFO_DATA_REQ (1<<0) #define I2C_CLK_DIVISOR 0x06c #define DVC_CTRL_REG1 0x000 #define DVC_CTRL_REG1_INTR_EN (1<<10) #define DVC_CTRL_REG2 0x004 #define DVC_CTRL_REG3 0x008 #define DVC_CTRL_REG3_SW_PROG (1<<26) #define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30) #define DVC_STATUS 0x00c #define DVC_STATUS_I2C_DONE_INTR (1<<30) #define I2C_ERR_NONE 0x00 #define I2C_ERR_NO_ACK 0x01 #define I2C_ERR_ARBITRATION_LOST 0x02 #define I2C_ERR_UNKNOWN_INTERRUPT 0x04 #define I2C_ERR_UNEXPECTED_STATUS 0x08 #define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 #define PACKET_HEADER0_PACKET_ID_SHIFT 16 #define PACKET_HEADER0_CONT_ID_SHIFT 12 #define PACKET_HEADER0_PROTOCOL_I2C (1<<4) #define I2C_HEADER_HIGHSPEED_MODE (1<<22) #define I2C_HEADER_CONT_ON_NAK (1<<21) #define I2C_HEADER_SEND_START_BYTE (1<<20) #define I2C_HEADER_READ (1<<19) #define I2C_HEADER_10BIT_ADDR (1<<18) #define I2C_HEADER_IE_ENABLE (1<<17) #define I2C_HEADER_REPEAT_START (1<<16) #define I2C_HEADER_CONTINUE_XFER (1<<15) #define I2C_HEADER_MASTER_ADDR_SHIFT 12 #define I2C_HEADER_SLAVE_ADDR_SHIFT 1 #define SL_ADDR1(addr) (addr & 0xff) #define SL_ADDR2(addr) ((addr >> 8) & 0xff) /* * msg_end_type: The bus control which need to be send at end of transfer. * @MSG_END_STOP: Send stop pulse at end of transfer. * @MSG_END_REPEAT_START: Send repeat start at end of transfer. * @MSG_END_CONTINUE: The following on message is coming and so do not send * stop or repeat start. */ enum msg_end_type { MSG_END_STOP, MSG_END_REPEAT_START, MSG_END_CONTINUE, }; struct tegra_i2c_dev; struct tegra_i2c_bus { struct tegra_i2c_dev *dev; const struct tegra_pingroup_config *mux; int mux_len; unsigned long bus_clk_rate; struct i2c_adapter adapter; int scl_gpio; int sda_gpio; }; /** * struct tegra_i2c_dev - per device i2c context * @dev: device reference for power management * @adapter: core i2c layer adapter information * @clk: clock reference for i2c controller * @i2c_clk: clock reference for i2c bus * @base: ioremapped registers cookie * @cont_id: i2c controller id, used for for packet header * @irq: irq number of transfer complete interrupt * @is_dvc: identifies the DVC i2c controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message * @msg_buf: pointer to current message data * @msg_buf_remaining: size of unsent data in the message buffer * @msg_read: identifies read transfers * @bus_clk_rate: current i2c bus clock rate * @is_suspended: prevents i2c controller accesses after suspend is called */ struct tegra_i2c_dev { struct device *dev; struct clk *div_clk; struct clk *fast_clk; struct rt_mutex dev_lock; spinlock_t fifo_lock; void __iomem *base; int cont_id; int irq; bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; u8 *msg_buf; u32 packet_header; u32 payload_size; u32 io_header; size_t msg_buf_remaining; int msg_read; struct i2c_msg *msgs; int msg_add; int msgs_num; bool is_suspended; int bus_count; const struct tegra_pingroup_config *last_mux; int last_mux_len; unsigned long last_bus_clk_rate; u16 slave_addr; bool is_clkon_always; bool is_high_speed_enable; u16 hs_master_code; int (*arb_recovery)(int scl_gpio, int sda_gpio); struct tegra_i2c_bus busses[1]; }; static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { writel(val, i2c_dev->base + reg); } static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { return readl(i2c_dev->base + reg); } static void dvc_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask = dvc_readl(i2c_dev, DVC_CTRL_REG3); int_mask &= ~mask; dvc_writel(i2c_dev, int_mask, DVC_CTRL_REG3); } static void dvc_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask = dvc_readl(i2c_dev, DVC_CTRL_REG3); int_mask |= mask; dvc_writel(i2c_dev, int_mask, DVC_CTRL_REG3); } /* * i2c_writel and i2c_readl will offset the register if necessary to talk * to the I2C block inside the DVC block */ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { if (i2c_dev->is_dvc) reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40; return reg; } static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); /* Read back register to make sure that register writes completed */ if (reg != I2C_TX_FIFO) readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned long reg, int len) { writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); } static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned long reg, int len) { readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); } static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK); int_mask &= ~mask; i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); } static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK); int_mask |= mask; i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); } static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev) { unsigned long timeout = jiffies + HZ; u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL); val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH; i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) & (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) { if (time_after(jiffies, timeout)) { dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n"); return -ETIMEDOUT; } msleep(1); } return 0; } static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev) { u32 val; int rx_fifo_avail; u8 *buf = i2c_dev->msg_buf; size_t buf_remaining = i2c_dev->msg_buf_remaining; int words_to_transfer; val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >> I2C_FIFO_STATUS_RX_SHIFT; /* Rounds down to not include partial word at the end of buf */ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; if (words_to_transfer > rx_fifo_avail) words_to_transfer = rx_fifo_avail; i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer); buf += words_to_transfer * BYTES_PER_FIFO_WORD; buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; rx_fifo_avail -= words_to_transfer; /* * If there is a partial word at the end of buf, handle it manually to * prevent overwriting past the end of buf */ if (rx_fifo_avail > 0 && buf_remaining > 0) { BUG_ON(buf_remaining > 3); val = i2c_readl(i2c_dev, I2C_RX_FIFO); memcpy(buf, &val, buf_remaining); buf_remaining = 0; rx_fifo_avail--; } BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0); i2c_dev->msg_buf_remaining = buf_remaining; i2c_dev->msg_buf = buf; return 0; } static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) { u32 val; int tx_fifo_avail; u8 *buf; size_t buf_remaining; int words_to_transfer; unsigned long flags; spin_lock_irqsave(&i2c_dev->fifo_lock, flags); if (!i2c_dev->msg_buf_remaining) { spin_unlock_irqrestore(&i2c_dev->fifo_lock, flags); return 0; } buf = i2c_dev->msg_buf; buf_remaining = i2c_dev->msg_buf_remaining; val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >> I2C_FIFO_STATUS_TX_SHIFT; /* Rounds down to not include partial word at the end of buf */ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; /* It's very common to have < 4 bytes, so optimize that case. */ if (words_to_transfer) { if (words_to_transfer > tx_fifo_avail) words_to_transfer = tx_fifo_avail; /* * Update state before writing to FIFO. If this casues us * to finish writing all bytes (AKA buf_remaining goes to 0) we * have a potential for an interrupt (PACKET_XFER_COMPLETE is * not maskable). We need to make sure that the isr sees * buf_remaining as 0 and doesn't call us back re-entrantly. */ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; tx_fifo_avail -= words_to_transfer; i2c_dev->msg_buf_remaining = buf_remaining; i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD; barrier(); i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); buf += words_to_transfer * BYTES_PER_FIFO_WORD; } /* * If there is a partial word at the end of buf, handle it manually to * prevent reading past the end of buf, which could cross a page * boundary and fault. */ if (tx_fifo_avail > 0 && buf_remaining > 0) { if (buf_remaining > 3) { dev_err(i2c_dev->dev, "Remaining buffer more than 3 %d\n", buf_remaining); BUG(); } memcpy(&val, buf, buf_remaining); /* Again update before writing to FIFO to make sure isr sees. */ i2c_dev->msg_buf_remaining = 0; i2c_dev->msg_buf = NULL; barrier(); i2c_writel(i2c_dev, val, I2C_TX_FIFO); } spin_unlock_irqrestore(&i2c_dev->fifo_lock, flags); return 0; } /* * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller) * block. This block is identical to the rest of the I2C blocks, except that * it only supports master mode, it has registers moved around, and it needs * some extra init to get it into I2C mode. The register moves are handled * by i2c_readl and i2c_writel */ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev) { u32 val = 0; val = dvc_readl(i2c_dev, DVC_CTRL_REG3); val |= DVC_CTRL_REG3_SW_PROG; dvc_writel(i2c_dev, val, DVC_CTRL_REG3); val = dvc_readl(i2c_dev, DVC_CTRL_REG1); val |= DVC_CTRL_REG1_INTR_EN; dvc_writel(i2c_dev, val, DVC_CTRL_REG1); } static void tegra_i2c_slave_init(struct tegra_i2c_dev *i2c_dev) { u32 val = I2C_SL_CNFG_NEWSL | I2C_SL_CNFG_NACK; i2c_writel(i2c_dev, val, I2C_SL_CNFG); if (i2c_dev->slave_addr) { u16 addr = i2c_dev->slave_addr; i2c_writel(i2c_dev, SL_ADDR1(addr), I2C_SL_ADDR1); i2c_writel(i2c_dev, SL_ADDR2(addr), I2C_SL_ADDR2); } } static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev) { int ret; ret = clk_enable(i2c_dev->fast_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Error in enabling fast clock err %d\n", ret); return ret; } ret = clk_enable(i2c_dev->div_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Error in enabling div clock err %d\n", ret); clk_disable(i2c_dev->fast_clk); } return ret; } static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) { clk_disable(i2c_dev->div_clk); clk_disable(i2c_dev->fast_clk); } static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val; int err = 0; tegra_i2c_clock_enable(i2c_dev); /* Interrupt generated before sending stop signal so * wait for some time so that stop signal can be send proerly */ mdelay(1); tegra_periph_reset_assert(i2c_dev->div_clk); udelay(2); tegra_periph_reset_deassert(i2c_dev->div_clk); if (i2c_dev->is_dvc) tegra_dvc_init(i2c_dev); val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); i2c_writel(i2c_dev, val, I2C_CNFG); i2c_writel(i2c_dev, 0, I2C_INT_MASK); clk_set_rate(i2c_dev->div_clk, i2c_dev->last_bus_clk_rate * 8); i2c_writel(i2c_dev, 0x3, I2C_CLK_DIVISOR); if (!i2c_dev->is_dvc) { u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG); sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL; i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG); i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1); i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2); } val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT | 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT; i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); if (!i2c_dev->is_dvc) tegra_i2c_slave_init(i2c_dev); if (tegra_i2c_flush_fifos(i2c_dev)) err = -ETIMEDOUT; tegra_i2c_clock_disable(i2c_dev); if (i2c_dev->irq_disabled) { i2c_dev->irq_disabled = 0; enable_irq(i2c_dev->irq); } return err; } static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) { u32 status; const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_TX_FIFO_OVERFLOW; struct tegra_i2c_dev *i2c_dev = dev_id; int times = 0; status = i2c_readl(i2c_dev, I2C_INT_STATUS); if (status == 0) { printk(KERN_INFO "[I2C]Unknown interrupt happen\n"); //Read I2C INT STATUS repetitiously for timing issue while(status == 0 && times < I2C_UNKNOWN_RETRY_TIME){ status = i2c_readl(i2c_dev, I2C_INT_STATUS); times++; if(times == (I2C_UNKNOWN_RETRY_TIME -1) ){ dev_warn(i2c_dev->dev, "unknown interrupt Add 0x%02x\n",i2c_dev->msg_add); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; goto err; } } printk(KERN_INFO "[I2C]Retry unknown interrupt success\n"); } if (unlikely(status & status_err)) { dev_warn(i2c_dev->dev, "I2c error status 0x%08x\n", status); if (status & I2C_INT_NO_ACK) { i2c_dev->msg_err |= I2C_ERR_NO_ACK; dev_warn(i2c_dev->dev, "no acknowledge from address" " 0x%x\n", i2c_dev->msg_add); dev_warn(i2c_dev->dev, "Packet status 0x%08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS)); } if (status & I2C_INT_ARBITRATION_LOST) { i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST; dev_warn(i2c_dev->dev, "arbitration lost during " " communicate to add 0x%x\n", i2c_dev->msg_add); dev_warn(i2c_dev->dev, "Packet status 0x%08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS)); } if (status & I2C_INT_TX_FIFO_OVERFLOW) { i2c_dev->msg_err |= I2C_INT_TX_FIFO_OVERFLOW; dev_warn(i2c_dev->dev, "Tx fifo overflow during " " communicate to add 0x%x\n", i2c_dev->msg_add); dev_warn(i2c_dev->dev, "Packet status 0x%08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS)); } goto err; } if (unlikely((i2c_readl(i2c_dev, I2C_STATUS) & I2C_STATUS_BUSY) && (status == I2C_INT_TX_FIFO_DATA_REQ) && i2c_dev->msg_read && i2c_dev->msg_buf_remaining)) { dev_warn(i2c_dev->dev, "unexpected status\n"); i2c_dev->msg_err |= I2C_ERR_UNEXPECTED_STATUS; if (!i2c_dev->irq_disabled) { disable_irq_nosync(i2c_dev->irq); i2c_dev->irq_disabled = 1; } goto err; } if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) { if (i2c_dev->msg_buf_remaining) tegra_i2c_empty_rx_fifo(i2c_dev); else BUG(); } if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) { if (i2c_dev->msg_buf_remaining) tegra_i2c_fill_tx_fifo(i2c_dev); else tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); } i2c_writel(i2c_dev, status, I2C_INT_STATUS); if (i2c_dev->is_dvc) dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); if (status & I2C_INT_PACKET_XFER_COMPLETE) { BUG_ON(i2c_dev->msg_buf_remaining); complete(&i2c_dev->msg_complete); } return IRQ_HANDLED; err: dev_dbg(i2c_dev->dev, "reg: 0x%08x 0x%08x 0x%08x 0x%08x\n", i2c_readl(i2c_dev, I2C_CNFG), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_INT_STATUS), i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS)); dev_dbg(i2c_dev->dev, "packet: 0x%08x %u 0x%08x\n", i2c_dev->packet_header, i2c_dev->payload_size, i2c_dev->io_header); if (i2c_dev->msgs) { struct i2c_msg *msgs = i2c_dev->msgs; int i; for (i = 0; i < i2c_dev->msgs_num; i++) dev_dbg(i2c_dev->dev, "msgs[%d] %c, addr=0x%04x, len=%d\n", i, (msgs[i].flags & I2C_M_RD) ? 'R' : 'W', msgs[i].addr, msgs[i].len); } /* An error occurred, mask all interrupts */ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ | I2C_INT_RX_FIFO_DATA_REQ | I2C_INT_TX_FIFO_OVERFLOW); i2c_writel(i2c_dev, status, I2C_INT_STATUS); /* An error occured, mask dvc interrupt */ if (i2c_dev->is_dvc) dvc_i2c_mask_irq(i2c_dev, DVC_CTRL_REG3_I2C_DONE_INTR_EN); if (i2c_dev->is_dvc) dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); complete(&i2c_dev->msg_complete); return IRQ_HANDLED; } static int tegra_i2c_xfer_msg(struct tegra_i2c_bus *i2c_bus, struct i2c_msg *msg, enum msg_end_type end_state) { struct tegra_i2c_dev *i2c_dev = i2c_bus->dev; u32 int_mask; int ret; int arb_stat; if (msg->len == 0) return -EINVAL; tegra_i2c_flush_fifos(i2c_dev); /* Toggle the direction flag if rev dir is selected */ if (msg->flags & I2C_M_REV_DIR_ADDR) msg->flags ^= I2C_M_RD; i2c_dev->msg_buf = msg->buf; i2c_dev->msg_buf_remaining = msg->len; i2c_dev->msg_err = I2C_ERR_NONE; i2c_dev->msg_read = (msg->flags & I2C_M_RD); INIT_COMPLETION(i2c_dev->msg_complete); i2c_dev->msg_add = msg->addr; i2c_dev->packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | PACKET_HEADER0_PROTOCOL_I2C | (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) | (1 << PACKET_HEADER0_PACKET_ID_SHIFT); i2c_writel(i2c_dev, i2c_dev->packet_header, I2C_TX_FIFO); i2c_dev->payload_size = msg->len - 1; i2c_writel(i2c_dev, i2c_dev->payload_size, I2C_TX_FIFO); i2c_dev->io_header = I2C_HEADER_IE_ENABLE; if (end_state == MSG_END_CONTINUE) i2c_dev->io_header |= I2C_HEADER_CONTINUE_XFER; else if (end_state == MSG_END_REPEAT_START) i2c_dev->io_header |= I2C_HEADER_REPEAT_START; if (msg->flags & I2C_M_TEN) { i2c_dev->io_header |= msg->addr; i2c_dev->io_header |= I2C_HEADER_10BIT_ADDR; } else { i2c_dev->io_header |= (msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT); } if (msg->flags & I2C_M_IGNORE_NAK) i2c_dev->io_header |= I2C_HEADER_CONT_ON_NAK; if (msg->flags & I2C_M_RD) i2c_dev->io_header |= I2C_HEADER_READ; if (i2c_dev->is_high_speed_enable) { i2c_dev->io_header |= I2C_HEADER_HIGHSPEED_MODE; i2c_dev->io_header |= ((i2c_dev->hs_master_code & 0x7) << I2C_HEADER_MASTER_ADDR_SHIFT); } i2c_writel(i2c_dev, i2c_dev->io_header, I2C_TX_FIFO); if (!(msg->flags & I2C_M_RD)) tegra_i2c_fill_tx_fifo(i2c_dev); if (i2c_dev->is_dvc) dvc_i2c_unmask_irq(i2c_dev, DVC_CTRL_REG3_I2C_DONE_INTR_EN); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_TX_FIFO_OVERFLOW; if (msg->flags & I2C_M_RD) int_mask |= I2C_INT_RX_FIFO_DATA_REQ; else if (i2c_dev->msg_buf_remaining) int_mask |= I2C_INT_TX_FIFO_DATA_REQ; tegra_i2c_unmask_irq(i2c_dev, int_mask); dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK)); ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT); tegra_i2c_mask_irq(i2c_dev, int_mask); if (i2c_dev->is_dvc) dvc_i2c_mask_irq(i2c_dev, DVC_CTRL_REG3_I2C_DONE_INTR_EN); /* Restore the message flag */ if (msg->flags & I2C_M_REV_DIR_ADDR) msg->flags ^= I2C_M_RD; if (WARN_ON(ret == 0)) { dev_err(i2c_dev->dev, "i2c transfer timed out, addr 0x%04x, data 0x%02x\n", msg->addr, msg->buf[0]); tegra_i2c_init(i2c_dev); return -ETIMEDOUT; } dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n", ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err); if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) return 0; /* Arbitration Lost occurs, Start recovery */ if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) { if (i2c_dev->arb_recovery) { arb_stat = i2c_dev->arb_recovery(i2c_bus->scl_gpio, i2c_bus->sda_gpio); if (!arb_stat) return -EAGAIN; } } /* * NACK interrupt is generated before the I2C controller generates the * STOP condition on the bus. So wait for 2 clock periods before resetting * the controller so that STOP condition has been delivered properly. */ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->last_bus_clk_rate)); tegra_i2c_init(i2c_dev); if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; return -EREMOTEIO; } if (i2c_dev->msg_err & I2C_ERR_UNEXPECTED_STATUS) return -EAGAIN; return -EIO; } bool tegra_i2c_is_ready(struct i2c_adapter *adap) { struct tegra_i2c_bus *i2c_bus = i2c_get_adapdata(adap); struct tegra_i2c_dev *i2c_dev = i2c_bus->dev; return !(i2c_dev->is_suspended); } EXPORT_SYMBOL(tegra_i2c_is_ready); static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct tegra_i2c_bus *i2c_bus = i2c_get_adapdata(adap); struct tegra_i2c_dev *i2c_dev = i2c_bus->dev; int i; int ret = 0; rt_mutex_lock(&i2c_dev->dev_lock); if (i2c_dev->is_suspended) { rt_mutex_unlock(&i2c_dev->dev_lock); return -EBUSY; } if (i2c_dev->last_mux != i2c_bus->mux) { tegra_pinmux_set_safe_pinmux_table(i2c_dev->last_mux, i2c_dev->last_mux_len); tegra_pinmux_config_pinmux_table(i2c_bus->mux, i2c_bus->mux_len); i2c_dev->last_mux = i2c_bus->mux; i2c_dev->last_mux_len = i2c_bus->mux_len; } if (i2c_dev->last_bus_clk_rate != i2c_bus->bus_clk_rate) { clk_set_rate(i2c_dev->div_clk, i2c_bus->bus_clk_rate * 8); i2c_dev->last_bus_clk_rate = i2c_bus->bus_clk_rate; } i2c_dev->msgs = msgs; i2c_dev->msgs_num = num; tegra_i2c_clock_enable(i2c_dev); for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP; if (i < (num - 1)) { if (msgs[i + 1].flags & I2C_M_NOSTART) end_type = MSG_END_CONTINUE; else end_type = MSG_END_REPEAT_START; } ret = tegra_i2c_xfer_msg(i2c_bus, &msgs[i], end_type); if (ret) break; } tegra_i2c_clock_disable(i2c_dev); rt_mutex_unlock(&i2c_dev->dev_lock); i2c_dev->msgs = NULL; i2c_dev->msgs_num = 0; return ret ?: i; } static u32 tegra_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm tegra_i2c_algo = { .master_xfer = tegra_i2c_xfer, .functionality = tegra_i2c_func, }; static int __devinit tegra_i2c_probe(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev; struct tegra_i2c_platform_data *plat = pdev->dev.platform_data; struct resource *res; struct clk *div_clk; struct clk *fast_clk = NULL; const unsigned int *prop; void *base; int irq; int nbus; int i = 0; int ret = 0; if (!plat) { dev_err(&pdev->dev, "no platform data?\n"); return -ENODEV; } if (plat->bus_count <= 0 || plat->adapter_nr < 0) { dev_err(&pdev->dev, "invalid platform data?\n"); return -ENODEV; } WARN_ON(plat->bus_count > TEGRA_I2C_MAX_BUS); nbus = min(TEGRA_I2C_MAX_BUS, plat->bus_count); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mem resource\n"); return -EINVAL; } base = devm_request_and_ioremap(&pdev->dev, res); if (!base) { dev_err(&pdev->dev, "Cannot request/ioremap I2C registers\n"); return -EADDRNOTAVAIL; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "no irq resource\n"); return -EINVAL; } irq = res->start; div_clk = devm_clk_get(&pdev->dev, "i2c-div"); if (IS_ERR(div_clk)) { dev_err(&pdev->dev, "missing controller clock"); return PTR_ERR(div_clk); } fast_clk = devm_clk_get(&pdev->dev, "i2c-fast"); if (IS_ERR(fast_clk)) { dev_err(&pdev->dev, "missing controller fast clock"); return PTR_ERR(fast_clk); } i2c_dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_i2c_dev) + (nbus-1) * sizeof(struct tegra_i2c_bus), GFP_KERNEL); if (!i2c_dev) { dev_err(&pdev->dev, "Could not allocate struct tegra_i2c_dev"); return -ENOMEM; } i2c_dev->base = base; i2c_dev->div_clk = div_clk; i2c_dev->fast_clk = fast_clk; i2c_dev->irq = irq; i2c_dev->cont_id = pdev->id; i2c_dev->dev = &pdev->dev; i2c_dev->is_clkon_always = plat->is_clkon_always; i2c_dev->last_bus_clk_rate = 100000; /* default clock rate */ if (plat) { i2c_dev->last_bus_clk_rate = plat->bus_clk_rate[0]; } else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */ /* TODO: DAN: this doesn't work for DT */ prop = of_get_property(i2c_dev->dev->of_node, "clock-frequency", NULL); if (prop) i2c_dev->last_bus_clk_rate = be32_to_cpup(prop); } i2c_dev->is_high_speed_enable = plat->is_high_speed_enable; i2c_dev->last_bus_clk_rate = plat->bus_clk_rate[0] ?: 100000; i2c_dev->msgs = NULL; i2c_dev->msgs_num = 0; rt_mutex_init(&i2c_dev->dev_lock); spin_lock_init(&i2c_dev->fifo_lock); i2c_dev->slave_addr = plat->slave_addr; i2c_dev->hs_master_code = plat->hs_master_code; i2c_dev->is_dvc = plat->is_dvc; i2c_dev->arb_recovery = plat->arb_recovery; init_completion(&i2c_dev->msg_complete); platform_set_drvdata(pdev, i2c_dev); if (i2c_dev->is_clkon_always) tegra_i2c_clock_enable(i2c_dev); ret = tegra_i2c_init(i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize i2c controller"); return ret; } ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, IRQF_NO_SUSPEND, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); return ret; } for (i = 0; i < nbus; i++) { struct tegra_i2c_bus *i2c_bus = &i2c_dev->busses[i]; i2c_bus->dev = i2c_dev; i2c_bus->mux = plat->bus_mux[i]; i2c_bus->mux_len = plat->bus_mux_len[i]; i2c_bus->bus_clk_rate = plat->bus_clk_rate[i] ?: 100000; i2c_bus->scl_gpio = plat->scl_gpio[i]; i2c_bus->sda_gpio = plat->sda_gpio[i]; i2c_bus->adapter.dev.of_node = pdev->dev.of_node; i2c_bus->adapter.algo = &tegra_i2c_algo; i2c_set_adapdata(&i2c_bus->adapter, i2c_bus); i2c_bus->adapter.owner = THIS_MODULE; i2c_bus->adapter.class = I2C_CLASS_HWMON; strlcpy(i2c_bus->adapter.name, "Tegra I2C adapter", sizeof(i2c_bus->adapter.name)); i2c_bus->adapter.dev.parent = &pdev->dev; i2c_bus->adapter.nr = plat->adapter_nr + i; if (plat->retries) i2c_bus->adapter.retries = plat->retries; else i2c_bus->adapter.retries = TEGRA_I2C_RETRIES; if (plat->timeout) i2c_bus->adapter.timeout = plat->timeout; ret = i2c_add_numbered_adapter(&i2c_bus->adapter); if (ret) { dev_err(&pdev->dev, "Failed to add I2C adapter\n"); goto err_del_bus; } of_i2c_register_devices(&i2c_bus->adapter); i2c_dev->bus_count++; } return 0; err_del_bus: while (i2c_dev->bus_count--) i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter); return ret; } static int __devexit tegra_i2c_remove(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); while (i2c_dev->bus_count--) i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter); if (i2c_dev->is_clkon_always) tegra_i2c_clock_disable(i2c_dev); return 0; } #ifdef CONFIG_PM static int tegra_i2c_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); rt_mutex_lock(&i2c_dev->dev_lock); i2c_dev->is_suspended = true; if (i2c_dev->is_clkon_always) tegra_i2c_clock_disable(i2c_dev); rt_mutex_unlock(&i2c_dev->dev_lock); return 0; } static int tegra_i2c_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); int ret; rt_mutex_lock(&i2c_dev->dev_lock); if (i2c_dev->is_clkon_always) tegra_i2c_clock_enable(i2c_dev); ret = tegra_i2c_init(i2c_dev); if (ret) { rt_mutex_unlock(&i2c_dev->dev_lock); return ret; } i2c_dev->is_suspended = false; rt_mutex_unlock(&i2c_dev->dev_lock); return 0; } static const struct dev_pm_ops tegra_i2c_pm = { .suspend_noirq = tegra_i2c_suspend_noirq, .resume_noirq = tegra_i2c_resume_noirq, }; #define TEGRA_I2C_PM (&tegra_i2c_pm) #else #define TEGRA_I2C_PM NULL #endif #if defined(CONFIG_OF) /* Match table for of_platform binding */ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { { .compatible = "nvidia,tegra20-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); #endif static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, .remove = __devexit_p(tegra_i2c_remove), .driver = { .name = "tegra-i2c", .owner = THIS_MODULE, .of_match_table = of_match_ptr(tegra_i2c_of_match), .pm = TEGRA_I2C_PM, }, }; static int __init tegra_i2c_init_driver(void) { return platform_driver_register(&tegra_i2c_driver); } static void __exit tegra_i2c_exit_driver(void) { platform_driver_unregister(&tegra_i2c_driver); } subsys_initcall(tegra_i2c_init_driver); module_exit(tegra_i2c_exit_driver); MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver"); MODULE_AUTHOR("Colin Cross"); MODULE_LICENSE("GPL v2");
aureljared/pulsar
drivers/i2c/busses/i2c-tegra.c
C
gpl-2.0
31,045
/* * Copyright (C) 2008 The Android Open Source Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <pthread.h> struct user_desc { unsigned int entry_number; unsigned long base_addr; unsigned int limit; unsigned int seg_32bit:1; unsigned int contents:2; unsigned int read_exec_only:1; unsigned int limit_in_pages:1; unsigned int seg_not_present:1; unsigned int useable:1; unsigned int empty:25; }; extern int __set_thread_area(struct user_desc *u_info); /* the following can't be const, since the first call will * update the 'entry_number' field */ static struct user_desc _tls_desc = { -1, 0, 0x1000, 1, 0, 0, 1, 0, 1, 0 }; static pthread_mutex_t _tls_desc_lock = PTHREAD_MUTEX_INITIALIZER; struct _thread_area_head { void *self; }; /* we implement thread local storage through the gs: segment descriptor * we create a segment descriptor for the tls */ int __set_tls(void *ptr) { int rc, segment; pthread_mutex_lock(&_tls_desc_lock); _tls_desc.base_addr = (unsigned long)ptr; /* We also need to write the location of the tls to ptr[0] */ ((struct _thread_area_head *)ptr)->self = ptr; rc = __set_thread_area( &_tls_desc ); if (rc != 0) { /* could not set thread local area */ pthread_mutex_unlock(&_tls_desc_lock); return -1; } /* this weird computation comes from GLibc */ segment = _tls_desc.entry_number*8 + 3; asm __volatile__ ( " movw %w0, %%gs" :: "q"(segment) ); pthread_mutex_unlock(&_tls_desc_lock); return 0; }
infraredbg/Lenovo_A820_kernel_kk
bionic/libc/arch-x86/bionic/__set_tls.c
C
gpl-2.0
2,960
/* ** I/O library. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h ** ** Major portions taken verbatim or adapted from the Lua interpreter. ** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h */ #include <errno.h> #include <stdio.h> #define lib_io_c #define LUA_LIB #include "lua.h" #include "lauxlib.h" #include "lualib.h" #include "lj_obj.h" #include "lj_gc.h" #include "lj_err.h" #include "lj_str.h" #include "lj_state.h" #include "lj_ff.h" #include "lj_lib.h" /* Userdata payload for I/O file. */ typedef struct IOFileUD { FILE *fp; /* File handle. */ uint32_t type; /* File type. */ } IOFileUD; #define IOFILE_TYPE_FILE 0 /* Regular file. */ #define IOFILE_TYPE_PIPE 1 /* Pipe. */ #define IOFILE_TYPE_STDF 2 /* Standard file handle. */ #define IOFILE_TYPE_MASK 3 #define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */ #define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud) #define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id)))) /* -- Open/close helpers -------------------------------------------------- */ static IOFileUD *io_tofilep(lua_State *L) { if (!(L->base < L->top && tvisudata(L->base) && udataV(L->base)->udtype == UDTYPE_IO_FILE)) lj_err_argtype(L, 1, "FILE*"); return (IOFileUD *)uddata(udataV(L->base)); } static IOFileUD *io_tofile(lua_State *L) { IOFileUD *iof = io_tofilep(L); if (iof->fp == NULL) lj_err_caller(L, LJ_ERR_IOCLFL); return iof; } static FILE *io_stdfile(lua_State *L, ptrdiff_t id) { IOFileUD *iof = IOSTDF_IOF(L, id); if (iof->fp == NULL) lj_err_caller(L, LJ_ERR_IOSTDCL); return iof->fp; } static IOFileUD *io_file_new(lua_State *L) { IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); GCudata *ud = udataV(L->top-1); ud->udtype = UDTYPE_IO_FILE; /* NOBARRIER: The GCudata is new (marked white). */ setgcrefr(ud->metatable, curr_func(L)->c.env); iof->fp = NULL; iof->type = IOFILE_TYPE_FILE; return iof; } static IOFileUD *io_file_open(lua_State *L, const char *mode) { const char *fname = strdata(lj_lib_checkstr(L, 1)); IOFileUD *iof = io_file_new(L); iof->fp = fopen(fname, mode); if (iof->fp == NULL) luaL_argerror(L, 1, lj_str_pushf(L, "%s: %s", fname, strerror(errno))); return iof; } static int io_file_close(lua_State *L, IOFileUD *iof) { int ok; if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) { ok = (fclose(iof->fp) == 0); } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) { int stat = -1; #if LJ_TARGET_POSIX stat = pclose(iof->fp); #elif LJ_TARGET_WINDOWS stat = _pclose(iof->fp); #else lua_assert(0); return 0; #endif #if LJ_52 iof->fp = NULL; return luaL_execresult(L, stat); #else ok = (stat != -1); #endif } else { lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF); setnilV(L->top++); lua_pushliteral(L, "cannot close standard file"); return 2; } iof->fp = NULL; return luaL_fileresult(L, ok, NULL); } /* -- Read/write helpers -------------------------------------------------- */ static int io_file_readnum(lua_State *L, FILE *fp) { lua_Number d; if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) { if (LJ_DUALNUM) { int32_t i = lj_num2int(d); if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) { setintV(L->top++, i); return 1; } } setnumV(L->top++, d); return 1; } else { setnilV(L->top++); return 0; } } static int io_file_readline(lua_State *L, FILE *fp, MSize chop) { MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0; char *buf; for (;;) { buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); if (fgets(buf+n, m-n, fp) == NULL) break; n += (MSize)strlen(buf+n); ok |= n; if (n && buf[n-1] == '\n') { n -= chop; break; } if (n >= m - 64) m += m; } setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); lj_gc_check(L); return (int)ok; } static void io_file_readall(lua_State *L, FILE *fp) { MSize m, n; for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) { char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); n += (MSize)fread(buf+n, 1, m-n, fp); if (n != m) { setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); lj_gc_check(L); return; } } } static int io_file_readlen(lua_State *L, FILE *fp, MSize m) { if (m) { char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, m); MSize n = (MSize)fread(buf, 1, m, fp); setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n)); lj_gc_check(L); return (n > 0 || m == 0); } else { int c = getc(fp); ungetc(c, fp); setstrV(L, L->top++, &G(L)->strempty); return (c != EOF); } } static int io_file_read(lua_State *L, FILE *fp, int start) { int ok, n, nargs = (int)(L->top - L->base) - start; clearerr(fp); if (nargs == 0) { ok = io_file_readline(L, fp, 1); n = start+1; /* Return 1 result. */ } else { /* The results plus the buffers go on top of the args. */ luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); ok = 1; for (n = start; nargs-- && ok; n++) { if (tvisstr(L->base+n)) { const char *p = strVdata(L->base+n); if (p[0] != '*') lj_err_arg(L, n+1, LJ_ERR_INVOPT); if (p[1] == 'n') ok = io_file_readnum(L, fp); else if ((p[1] & ~0x20) == 'L') ok = io_file_readline(L, fp, (p[1] == 'l')); else if (p[1] == 'a') io_file_readall(L, fp); else lj_err_arg(L, n+1, LJ_ERR_INVFMT); } else if (tvisnumber(L->base+n)) { ok = io_file_readlen(L, fp, (MSize)lj_lib_checkint(L, n+1)); } else { lj_err_arg(L, n+1, LJ_ERR_INVOPT); } } } if (ferror(fp)) return luaL_fileresult(L, 0, NULL); if (!ok) setnilV(L->top-1); /* Replace last result with nil. */ return n - start; } static int io_file_write(lua_State *L, FILE *fp, int start) { cTValue *tv; int status = 1; for (tv = L->base+start; tv < L->top; tv++) { if (tvisstr(tv)) { MSize len = strV(tv)->len; status = status && (fwrite(strVdata(tv), 1, len, fp) == len); } else if (tvisint(tv)) { char buf[LJ_STR_INTBUF]; char *p = lj_str_bufint(buf, intV(tv)); size_t len = (size_t)(buf+LJ_STR_INTBUF-p); status = status && (fwrite(p, 1, len, fp) == len); } else if (tvisnum(tv)) { status = status && (fprintf(fp, LUA_NUMBER_FMT, numV(tv)) > 0); } else { lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING); } } if (LJ_52 && status) { L->top = L->base+1; if (start == 0) setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_OUTPUT)); return 1; } return luaL_fileresult(L, status, NULL); } static int io_file_iter(lua_State *L) { GCfunc *fn = curr_func(L); IOFileUD *iof = uddata(udataV(&fn->c.upvalue[0])); int n = fn->c.nupvalues - 1; if (iof->fp == NULL) lj_err_caller(L, LJ_ERR_IOCLFL); L->top = L->base; if (n) { /* Copy upvalues with options to stack. */ if (n > LUAI_MAXCSTACK) lj_err_caller(L, LJ_ERR_STKOV); lj_state_checkstack(L, (MSize)n); memcpy(L->top, &fn->c.upvalue[1], n*sizeof(TValue)); L->top += n; } n = io_file_read(L, iof->fp, 0); if (ferror(iof->fp)) lj_err_callermsg(L, strVdata(L->top-2)); if (tvisnil(L->base) && (iof->type & IOFILE_FLAG_CLOSE)) { io_file_close(L, iof); /* Return values are ignored. */ return 0; } return n; } static int io_file_lines(lua_State *L) { int n = (int)(L->top - L->base); if (n > LJ_MAX_UPVAL) lj_err_caller(L, LJ_ERR_UNPACK); lua_pushcclosure(L, io_file_iter, n); return 1; } /* -- I/O file methods ---------------------------------------------------- */ #define LJLIB_MODULE_io_method LJLIB_CF(io_method_close) { IOFileUD *iof = L->base < L->top ? io_tofile(L) : IOSTDF_IOF(L, GCROOT_IO_OUTPUT); return io_file_close(L, iof); } LJLIB_CF(io_method_read) { return io_file_read(L, io_tofile(L)->fp, 1); } LJLIB_CF(io_method_write) LJLIB_REC(io_write 0) { return io_file_write(L, io_tofile(L)->fp, 1); } LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0) { return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL); } LJLIB_CF(io_method_seek) { FILE *fp = io_tofile(L)->fp; int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end"); int64_t ofs = 0; cTValue *o; int res; if (opt == 0) opt = SEEK_SET; else if (opt == 1) opt = SEEK_CUR; else if (opt == 2) opt = SEEK_END; o = L->base+2; if (o < L->top) { if (tvisint(o)) ofs = (int64_t)intV(o); else if (tvisnum(o)) ofs = (int64_t)numV(o); else if (!tvisnil(o)) lj_err_argt(L, 3, LUA_TNUMBER); } #if LJ_TARGET_POSIX res = fseeko(fp, ofs, opt); #elif _MSC_VER >= 1400 res = _fseeki64(fp, ofs, opt); #elif defined(__MINGW32__) res = fseeko64(fp, ofs, opt); #else res = fseek(fp, (long)ofs, opt); #endif if (res) return luaL_fileresult(L, 0, NULL); #if LJ_TARGET_POSIX ofs = ftello(fp); #elif _MSC_VER >= 1400 ofs = _ftelli64(fp); #elif defined(__MINGW32__) ofs = ftello64(fp); #else ofs = (int64_t)ftell(fp); #endif setint64V(L->top-1, ofs); return 1; } LJLIB_CF(io_method_setvbuf) { FILE *fp = io_tofile(L)->fp; int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no"); size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE); if (opt == 0) opt = _IOFBF; else if (opt == 1) opt = _IOLBF; else if (opt == 2) opt = _IONBF; return luaL_fileresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL); } LJLIB_CF(io_method_lines) { io_tofile(L); return io_file_lines(L); } LJLIB_CF(io_method___gc) { IOFileUD *iof = io_tofilep(L); if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF) io_file_close(L, iof); return 0; } LJLIB_CF(io_method___tostring) { IOFileUD *iof = io_tofilep(L); if (iof->fp != NULL) lua_pushfstring(L, "file (%p)", iof->fp); else lua_pushliteral(L, "file (closed)"); return 1; } LJLIB_PUSH(top-1) LJLIB_SET(__index) #include "lj_libdef.h" /* -- I/O library functions ----------------------------------------------- */ #define LJLIB_MODULE_io LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */ LJLIB_CF(io_open) { const char *fname = strdata(lj_lib_checkstr(L, 1)); GCstr *s = lj_lib_optstr(L, 2); const char *mode = s ? strdata(s) : "r"; IOFileUD *iof = io_file_new(L); iof->fp = fopen(fname, mode); return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); } LJLIB_CF(io_popen) { #if LJ_TARGET_POSIX || LJ_TARGET_WINDOWS const char *fname = strdata(lj_lib_checkstr(L, 1)); GCstr *s = lj_lib_optstr(L, 2); const char *mode = s ? strdata(s) : "r"; IOFileUD *iof = io_file_new(L); iof->type = IOFILE_TYPE_PIPE; #if LJ_TARGET_POSIX fflush(NULL); iof->fp = popen(fname, mode); #else iof->fp = _popen(fname, mode); #endif return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname); #else return luaL_error(L, LUA_QL("popen") " not supported"); #endif } LJLIB_CF(io_tmpfile) { IOFileUD *iof = io_file_new(L); #if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA iof->fp = NULL; errno = ENOSYS; #else iof->fp = tmpfile(); #endif return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, NULL); } LJLIB_CF(io_close) { return lj_cf_io_method_close(L); } LJLIB_CF(io_read) { return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0); } LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT) { return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0); } LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT) { return luaL_fileresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)) == 0, NULL); } static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode) { if (L->base < L->top && !tvisnil(L->base)) { if (tvisudata(L->base)) { io_tofile(L); L->top = L->base+1; } else { io_file_open(L, mode); } /* NOBARRIER: The standard I/O handles are GC roots. */ setgcref(G(L)->gcroot[id], gcV(L->top-1)); } else { setudataV(L, L->top++, IOSTDF_UD(L, id)); } return 1; } LJLIB_CF(io_input) { return io_std_getset(L, GCROOT_IO_INPUT, "r"); } LJLIB_CF(io_output) { return io_std_getset(L, GCROOT_IO_OUTPUT, "w"); } LJLIB_CF(io_lines) { if (L->base == L->top) setnilV(L->top++); if (!tvisnil(L->base)) { /* io.lines(fname) */ IOFileUD *iof = io_file_open(L, "r"); iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE; L->top--; setudataV(L, L->base, udataV(L->top)); } else { /* io.lines() iterates over stdin. */ setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_INPUT)); } return io_file_lines(L); } LJLIB_CF(io_type) { cTValue *o = lj_lib_checkany(L, 1); if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE)) setnilV(L->top++); else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL) lua_pushliteral(L, "file"); else lua_pushliteral(L, "closed file"); return 1; } #include "lj_libdef.h" /* ------------------------------------------------------------------------ */ static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name) { IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD)); GCudata *ud = udataV(L->top-1); ud->udtype = UDTYPE_IO_FILE; /* NOBARRIER: The GCudata is new (marked white). */ setgcref(ud->metatable, gcV(L->top-3)); iof->fp = fp; iof->type = IOFILE_TYPE_STDF; lua_setfield(L, -2, name); return obj2gco(ud); } LUALIB_API int luaopen_io(lua_State *L) { LJ_LIB_REG(L, NULL, io_method); copyTV(L, L->top, L->top-1); L->top++; lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); LJ_LIB_REG(L, LUA_IOLIBNAME, io); setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin")); setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout")); io_std_new(L, stderr, "stderr"); return 1; }
team-parasol/parasol
src/fluid/luajit-2.0.5/src/lib_io.c
C
lgpl-2.1
13,724
/** * Copyright (C) 2014 Martin Landsmann <Martin.Landsmann@HAW-Hamburg.de> * * This file is subject to the terms and conditions of the GNU Lesser * General Public License v2.1. See the file LICENSE in the top level * directory for more details. */ /** * @ingroup net_fib * @{ * * @file * @brief Functions to manage FIB entries * * @author Martin Landsmann <martin.landsmann@haw-hamburg.de> * @author Oliver Hahm <oliver.hahm@inria.fr> * * @} */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include <errno.h> #include "thread.h" #include "mutex.h" #include "msg.h" #include "xtimer.h" #include "timex.h" #include "utlist.h" #define ENABLE_DEBUG (0) #include "debug.h" #include "net/fib.h" #include "net/fib/table.h" #ifdef MODULE_IPV6_ADDR #include "net/ipv6/addr.h" static char addr_str[IPV6_ADDR_MAX_STR_LEN]; #endif #ifdef MODULE_IPV6_ADDR #define FIB_ADDR_PRINT_LEN 39 #else #define FIB_ADDR_PRINT_LEN 32 #if FIB_ADDR_PRINT_LEN != (UNIVERSAL_ADDRESS_SIZE * 2) #error "FIB_ADDR_PRINT_LEN MUST BE (UNIVERSAL_ADDRESS_SIZE * 2)" #endif #endif #define FIB_ADDR_PRINT_LENS1(X) #X #define FIB_ADDR_PRINT_LENS2(X) FIB_ADDR_PRINT_LENS1(X) #define FIB_ADDR_PRINT_LENS FIB_ADDR_PRINT_LENS2(FIB_ADDR_PRINT_LEN) /** * @brief convert an offset given in ms to absolute time in time in us * @param[in] ms the milliseconds to be converted * @param[out] target the converted point in time */ static void fib_lifetime_to_absolute(uint32_t ms, uint64_t *target) { *target = xtimer_now_usec64() + (ms * US_PER_MS); } /** * @brief returns pointer to the entry for the given destination address * * @param[in] table the FIB table to search in * @param[in] dst the destination address * @param[in] dst_size the destination address size * @param[out] entry_arr the array to scribe the found match * @param[in, out] entry_arr_size the number of entries provided by entry_arr (should be always 1) * this value is overwritten with the actual found number * * @return 0 if we found a next-hop prefix * 1 if we found the exact address next-hop * -EHOSTUNREACH if no fitting next-hop is available */ static int fib_find_entry(fib_table_t *table, uint8_t *dst, size_t dst_size, fib_entry_t **entry_arr, size_t *entry_arr_size) { uint64_t now = xtimer_now_usec64(); size_t count = 0; size_t prefix_size = 0; size_t match_size = dst_size << 3; int ret = -EHOSTUNREACH; bool is_all_zeros_addr = true; #if ENABLE_DEBUG DEBUG("[fib_find_entry] dst ="); for (size_t i = 0; i < dst_size; i++) { DEBUG(" %02x", dst[i]); } DEBUG("\n"); #endif for (size_t i = 0; i < dst_size; ++i) { if (dst[i] != 0) { is_all_zeros_addr = false; break; } } for (size_t i = 0; i < table->size; ++i) { /* autoinvalidate if the entry lifetime is not set to not expire */ if (table->data.entries[i].lifetime != FIB_LIFETIME_NO_EXPIRE) { /* check if the lifetime expired */ if (table->data.entries[i].lifetime < now) { /* remove this entry if its lifetime expired */ table->data.entries[i].lifetime = 0; table->data.entries[i].global_flags = 0; table->data.entries[i].next_hop_flags = 0; table->data.entries[i].iface_id = KERNEL_PID_UNDEF; if (table->data.entries[i].global != NULL) { universal_address_rem(table->data.entries[i].global); table->data.entries[i].global = NULL; } if (table->data.entries[i].next_hop != NULL) { universal_address_rem(table->data.entries[i].next_hop); table->data.entries[i].next_hop = NULL; } } } if ((prefix_size < (dst_size<<3)) && (table->data.entries[i].global != NULL)) { int ret_comp = universal_address_compare(table->data.entries[i].global, dst, &match_size); /* If we found an exact match */ if ((ret_comp == UNIVERSAL_ADDRESS_EQUAL) || (is_all_zeros_addr && (ret_comp == UNIVERSAL_ADDRESS_IS_ALL_ZERO_ADDRESS))) { entry_arr[0] = &(table->data.entries[i]); *entry_arr_size = 1; /* we will not find a better one so we return */ return 1; } else { /* we try to find the most fitting prefix */ if (ret_comp == UNIVERSAL_ADDRESS_MATCHING_PREFIX) { if (table->data.entries[i].global_flags & FIB_FLAG_NET_PREFIX_MASK) { /* we shift the most upper flag byte back to get the number of prefix bits */ size_t global_prefix_len = (table->data.entries[i].global_flags & FIB_FLAG_NET_PREFIX_MASK) >> FIB_FLAG_NET_PREFIX_SHIFT; if ((match_size >= global_prefix_len) && ((prefix_size == 0) || (match_size > prefix_size))) { entry_arr[0] = &(table->data.entries[i]); /* we could find a better one so we move on */ ret = 0; prefix_size = match_size; count = 1; } } } else if (ret_comp == UNIVERSAL_ADDRESS_IS_ALL_ZERO_ADDRESS) { /* we found the default gateway entry, e.g. ::/0 for IPv6 * and we keep it only if there is no better one */ if (prefix_size == 0) { entry_arr[0] = &(table->data.entries[i]); /* we could find a better one so we move on */ ret = 0; count = 1; } } match_size = dst_size<<3; } } } #if ENABLE_DEBUG if (count > 0) { DEBUG("[fib_find_entry] found prefix on interface %d:", entry_arr[0]->iface_id); for (size_t i = 0; i < entry_arr[0]->global->address_size; i++) { DEBUG(" %02x", entry_arr[0]->global->address[i]); } DEBUG("\n"); } #endif *entry_arr_size = count; return ret; } /** * @brief updates the next hop the lifetime and the interface id for a given entry * * @param[in] entry the entry to be updated * @param[in] next_hop the next hop address to be updated * @param[in] next_hop_size the next hop address size * @param[in] next_hop_flags the next-hop address flags * @param[in] lifetime the lifetime in ms * * @return 0 if the entry has been updated * -ENOMEM if the entry cannot be updated due to insufficient RAM */ static int fib_upd_entry(fib_entry_t *entry, uint8_t *next_hop, size_t next_hop_size, uint32_t next_hop_flags, uint32_t lifetime) { universal_address_container_t *container = universal_address_add(next_hop, next_hop_size); if (container == NULL) { return -ENOMEM; } universal_address_rem(entry->next_hop); entry->next_hop = container; entry->next_hop_flags = next_hop_flags; if (lifetime != (uint32_t)FIB_LIFETIME_NO_EXPIRE) { fib_lifetime_to_absolute(lifetime, &entry->lifetime); } else { entry->lifetime = FIB_LIFETIME_NO_EXPIRE; } return 0; } /** * @brief creates a new FIB entry with the provided parameters * * @param[in] table the FIB table to create the entry in * @param[in] iface_id the interface ID * @param[in] dst the destination address * @param[in] dst_size the destination address size * @param[in] dst_flags the destination address flags * @param[in] next_hop the next hop address * @param[in] next_hop_size the next hop address size * @param[in] next_hop_flags the next-hop address flags * @param[in] lifetime the lifetime in ms * * @return 0 on success * -ENOMEM if no new entry can be created */ static int fib_create_entry(fib_table_t *table, kernel_pid_t iface_id, uint8_t *dst, size_t dst_size, uint32_t dst_flags, uint8_t *next_hop, size_t next_hop_size, uint32_t next_hop_flags, uint32_t lifetime) { for (size_t i = 0; i < table->size; ++i) { if (table->data.entries[i].lifetime == 0) { table->data.entries[i].global = universal_address_add(dst, dst_size); if (table->data.entries[i].global != NULL) { table->data.entries[i].global_flags = dst_flags; table->data.entries[i].next_hop = universal_address_add(next_hop, next_hop_size); table->data.entries[i].next_hop_flags = next_hop_flags; } if (table->data.entries[i].next_hop != NULL) { /* everything worked fine */ table->data.entries[i].iface_id = iface_id; if (lifetime != (uint32_t) FIB_LIFETIME_NO_EXPIRE) { fib_lifetime_to_absolute(lifetime, &table->data.entries[i].lifetime); } else { table->data.entries[i].lifetime = FIB_LIFETIME_NO_EXPIRE; } return 0; } } } return -ENOMEM; } /** * @brief removes the given entry * * @param[in] entry the entry to be removed * * @return 0 on success */ static int fib_remove(fib_entry_t *entry) { if (entry->global != NULL) { universal_address_rem(entry->global); } if (entry->next_hop) { universal_address_rem(entry->next_hop); } entry->global = NULL; entry->global_flags = 0; entry->next_hop = NULL; entry->next_hop_flags = 0; entry->iface_id = KERNEL_PID_UNDEF; entry->lifetime = 0; return 0; } /** * @brief signals (sends a message to) all registered routing protocols * registered with a matching prefix (usually this should be only one). * The receiver MUST copy the content, i.e. the address before reply. * * @param[in] table the fib instance to use * @param[in] type the kind of signal * @param[in] dat the data to send * @param[in] dat_size the data size in bytes * @param[in] dat_flags the data flags * * @return 0 on a new available entry, * -ENOENT if no suiting entry is provided. */ static int fib_signal_rp(fib_table_t *table, uint16_t type, uint8_t *dat, size_t dat_size, uint32_t dat_flags) { msg_t msg, reply; rp_address_msg_t rp_addr_msg; int ret = -ENOENT; void *content = NULL; if (type != FIB_MSG_RP_SIGNAL_SOURCE_ROUTE_CREATED) { /* the passed data is an address */ rp_addr_msg.address = dat; rp_addr_msg.address_size = dat_size; rp_addr_msg.address_flags = dat_flags; content = (void *)&rp_addr_msg; } else { /* the passed data is a sr head * dat_size and dat_flags are not used in this case */ content = (void *)dat; } msg.type = type; msg.content.ptr = content; for (size_t i = 0; i < FIB_MAX_REGISTERED_RP; ++i) { if (table->notify_rp[i] != KERNEL_PID_UNDEF) { DEBUG("[fib_signal_rp] send msg@: %p to pid[%d]: %d\n", \ msg.content.ptr, (int)i, (int)(table->notify_rp[i])); /* do only signal a RP if its registered prefix matches */ if (type != FIB_MSG_RP_SIGNAL_SOURCE_ROUTE_CREATED) { size_t dat_size_in_bits = dat_size<<3; if (universal_address_compare(table->prefix_rp[i], dat, &dat_size_in_bits) != -ENOENT) { /* the receiver, i.e. the RP, MUST copy the content value. * using the provided pointer after replying this message * will lead to errors */ msg_send_receive(&msg, &reply, table->notify_rp[i]); DEBUG("[fib_signal_rp] got reply.\n"); ret = 0; } } else { fib_sr_t *temp_sr = (fib_sr_t *)dat; size_t dat_size_in_bits = temp_sr->sr_dest->address->address_size << 3; if (universal_address_compare(table->prefix_rp[i], temp_sr->sr_dest->address->address, &dat_size_in_bits) != -ENOENT) { /* the receiver, i.e. the RP, MUST copy the content value. * using the provided pointer after replying this message * will lead to errors */ msg_send_receive(&msg, &reply, table->notify_rp[i]); DEBUG("[fib_signal_rp] got reply.\n"); ret = 0; } } } } return ret; } int fib_add_entry(fib_table_t *table, kernel_pid_t iface_id, uint8_t *dst, size_t dst_size, uint32_t dst_flags, uint8_t *next_hop, size_t next_hop_size, uint32_t next_hop_flags, uint32_t lifetime) { mutex_lock(&(table->mtx_access)); DEBUG("[fib_add_entry]\n"); size_t count = 1; fib_entry_t *entry[count]; /* check if dst and next_hop are valid pointers */ if ((dst == NULL) || (next_hop == NULL)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } int ret = fib_find_entry(table, dst, dst_size, &(entry[0]), &count); if (ret == 1) { /* we must take the according entry and update the values */ ret = fib_upd_entry(entry[0], next_hop, next_hop_size, next_hop_flags, lifetime); } else { ret = fib_create_entry(table, iface_id, dst, dst_size, dst_flags, next_hop, next_hop_size, next_hop_flags, lifetime); } mutex_unlock(&(table->mtx_access)); return ret; } int fib_update_entry(fib_table_t *table, uint8_t *dst, size_t dst_size, uint8_t *next_hop, size_t next_hop_size, uint32_t next_hop_flags, uint32_t lifetime) { mutex_lock(&(table->mtx_access)); DEBUG("[fib_update_entry]\n"); size_t count = 1; fib_entry_t *entry[count]; int ret = -ENOMEM; /* check if dst and next_hop are valid pointers */ if ((dst == NULL) || (next_hop == NULL)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_find_entry(table, dst, dst_size, &(entry[0]), &count) == 1) { DEBUG("[fib_update_entry] found entry: %p\n", (void *)(entry[0])); /* we must take the according entry and update the values */ ret = fib_upd_entry(entry[0], next_hop, next_hop_size, next_hop_flags, lifetime); } else { /* we have ambiguous entries, i.e. count > 1 * this should never happen */ DEBUG("[fib_update_entry] ambiguous entries detected!!!\n"); } mutex_unlock(&(table->mtx_access)); return ret; } void fib_remove_entry(fib_table_t *table, uint8_t *dst, size_t dst_size) { mutex_lock(&(table->mtx_access)); DEBUG("[fib_remove_entry]\n"); size_t count = 1; fib_entry_t *entry[count]; int ret = fib_find_entry(table, dst, dst_size, &(entry[0]), &count); if (ret == 1) { /* we must take the according entry and update the values */ fib_remove(entry[0]); } else { /* we have ambiguous entries, i.e. count > 1 * this should never happen */ DEBUG("[fib_update_entry] ambiguous entries detected!!!\n"); } mutex_unlock(&(table->mtx_access)); } void fib_flush(fib_table_t *table, kernel_pid_t interface) { mutex_lock(&(table->mtx_access)); DEBUG("[fib_flush]\n"); for (size_t i = 0; i < table->size; ++i) { if ((interface == KERNEL_PID_UNDEF) || (interface == table->data.entries[i].iface_id)) { fib_remove(&table->data.entries[i]); } } mutex_unlock(&(table->mtx_access)); } int fib_get_next_hop(fib_table_t *table, kernel_pid_t *iface_id, uint8_t *next_hop, size_t *next_hop_size, uint32_t *next_hop_flags, uint8_t *dst, size_t dst_size, uint32_t dst_flags) { mutex_lock(&(table->mtx_access)); DEBUG("[fib_get_next_hop]\n"); size_t count = 1; fib_entry_t *entry[count]; if ((iface_id == NULL) || (next_hop_size == NULL) || (next_hop_flags == NULL)) { mutex_unlock(&(table->mtx_access)); return -EINVAL; } if ((dst == NULL) || (next_hop == NULL)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } int ret = fib_find_entry(table, dst, dst_size, &(entry[0]), &count); if (!(ret == 0 || ret == 1)) { /* notify all responsible RPs for unknown next-hop for the destination address */ if (fib_signal_rp(table, FIB_MSG_RP_SIGNAL_UNREACHABLE_DESTINATION, dst, dst_size, dst_flags) == 0) { count = 1; /* now lets see if the RRPs have found a valid next-hop */ ret = fib_find_entry(table, dst, dst_size, &(entry[0]), &count); } } if (ret == 0 || ret == 1) { uint8_t *address_ret = universal_address_get_address(entry[0]->next_hop, next_hop, next_hop_size); if (address_ret == NULL) { mutex_unlock(&(table->mtx_access)); return -ENOBUFS; } } else { mutex_unlock(&(table->mtx_access)); return -EHOSTUNREACH; } *iface_id = entry[0]->iface_id; *next_hop_flags = entry[0]->next_hop_flags; mutex_unlock(&(table->mtx_access)); return 0; } int fib_get_destination_set(fib_table_t *table, uint8_t *prefix, size_t prefix_size, fib_destination_set_entry_t *dst_set, size_t* dst_set_size) { mutex_lock(&(table->mtx_access)); int ret = -EHOSTUNREACH; size_t found_entries = 0; for (size_t i = 0; i < table->size; ++i) { if ((table->data.entries[i].global != NULL) && (universal_address_compare_prefix(table->data.entries[i].global, prefix, prefix_size<<3) >= UNIVERSAL_ADDRESS_EQUAL)) { if( (dst_set != NULL) && (found_entries < *dst_set_size) ) { /* set the size to full byte usage */ dst_set[found_entries].dest_size = sizeof(dst_set[found_entries].dest); universal_address_get_address(table->data.entries[i].global, dst_set[found_entries].dest, &dst_set[found_entries].dest_size); } found_entries++; } } if (found_entries > *dst_set_size) { ret = -ENOBUFS; } else if (found_entries > 0) { ret = 0; } *dst_set_size = found_entries; mutex_unlock(&(table->mtx_access)); return ret; } void fib_init(fib_table_t *table) { DEBUG("[fib_init] hello. Initializing some stuff.\n"); mutex_init(&(table->mtx_access)); mutex_lock(&(table->mtx_access)); for (size_t i = 0; i < FIB_MAX_REGISTERED_RP; ++i) { table->notify_rp[i] = KERNEL_PID_UNDEF; table->prefix_rp[i] = NULL; } table->notify_rp_pos = 0; if (table->table_type == FIB_TABLE_TYPE_SR) { memset(table->data.source_routes->headers, 0, sizeof(fib_sr_t) * table->size); memset(table->data.source_routes->entry_pool, 0, sizeof(fib_sr_entry_t) * table->data.source_routes->entry_pool_size); } else { memset(table->data.entries, 0, (table->size * sizeof(fib_entry_t))); } universal_address_init(); mutex_unlock(&(table->mtx_access)); } void fib_deinit(fib_table_t *table) { DEBUG("[fib_deinit] hello. De-Initializing stuff.\n"); mutex_lock(&(table->mtx_access)); for (size_t i = 0; i < FIB_MAX_REGISTERED_RP; ++i) { table->notify_rp[i] = KERNEL_PID_UNDEF; table->prefix_rp[i] = NULL; } table->notify_rp_pos = 0; if (table->table_type == FIB_TABLE_TYPE_SR) { memset(table->data.source_routes->headers, 0, sizeof(fib_sr_t) * table->size); memset(table->data.source_routes->entry_pool, 0, sizeof(fib_sr_entry_t) * table->data.source_routes->entry_pool_size); } else { memset(table->data.entries, 0, (table->size * sizeof(fib_entry_t))); } universal_address_reset(); mutex_unlock(&(table->mtx_access)); } int fib_register_rp(fib_table_t *table, uint8_t *prefix, size_t prefix_addr_type_size) { mutex_lock(&(table->mtx_access)); if (table->notify_rp_pos >= FIB_MAX_REGISTERED_RP) { mutex_unlock(&(table->mtx_access)); return -ENOMEM; } if ((prefix == NULL) || (prefix_addr_type_size == 0)) { mutex_unlock(&(table->mtx_access)); return -EINVAL; } if (table->notify_rp_pos < FIB_MAX_REGISTERED_RP) { table->notify_rp[table->notify_rp_pos] = sched_active_pid; universal_address_container_t *container = universal_address_add(prefix, prefix_addr_type_size); table->prefix_rp[table->notify_rp_pos] = container; table->notify_rp_pos++; } mutex_unlock(&(table->mtx_access)); return 0; } int fib_get_num_used_entries(fib_table_t *table) { mutex_lock(&(table->mtx_access)); size_t used_entries = 0; for (size_t i = 0; i < table->size; ++i) { used_entries += (size_t)(table->data.entries[i].global != NULL); } mutex_unlock(&(table->mtx_access)); return used_entries; } /* source route handling */ int fib_sr_create(fib_table_t *table, fib_sr_t **fib_sr, kernel_pid_t sr_iface_id, uint32_t sr_flags, uint32_t sr_lifetime) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (sr_lifetime == 0)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } for (size_t i = 0; i < table->size; ++i) { if (table->data.source_routes->headers[i].sr_lifetime == 0) { table->data.source_routes->headers[i].sr_iface_id = sr_iface_id; table->data.source_routes->headers[i].sr_flags = sr_flags; table->data.source_routes->headers[i].sr_path = NULL; table->data.source_routes->headers[i].sr_dest = NULL; if (sr_lifetime < (uint32_t)FIB_LIFETIME_NO_EXPIRE) { fib_lifetime_to_absolute(sr_lifetime, &table->data.source_routes->headers[i].sr_lifetime); } else { table->data.source_routes->headers[i].sr_lifetime = FIB_LIFETIME_NO_EXPIRE; } *fib_sr = &table->data.source_routes->headers[i]; mutex_unlock(&(table->mtx_access)); return 0; } } mutex_unlock(&(table->mtx_access)); return -ENOBUFS; } /** * @brief Internal function: * checks the lifetime and removes the entry in case it expired */ static int fib_sr_check_lifetime(fib_sr_t *fib_sr) { uint64_t tm = fib_sr->sr_lifetime - xtimer_now_usec64(); /* check if the lifetime expired */ if ((int64_t)tm < 0) { /* remove this sr if its lifetime expired */ fib_sr->sr_lifetime = 0; if (fib_sr->sr_path != NULL) { fib_sr_entry_t *elt = NULL; LL_FOREACH(fib_sr->sr_path, elt) { universal_address_rem(elt->address); } fib_sr->sr_path = NULL; } /* and return an errorcode */ return -ENOENT; } return 0; } /** * @brief Internal function: * creates a new entry in the table entry pool for a hop in a source route */ static int fib_sr_new_entry(fib_table_t *table, uint8_t *addr, size_t addr_size, fib_sr_entry_t **new_entry) { for (size_t i = 0; i < table->data.source_routes->entry_pool_size; ++i) { if (table->data.source_routes->entry_pool[i].address == NULL) { table->data.source_routes->entry_pool[i].address = universal_address_add(addr, addr_size); if (table->data.source_routes->entry_pool[i].address == NULL) { return -ENOMEM; } else { (void)new_entry; *new_entry = &table->data.source_routes->entry_pool[i]; return 0; } } } return -ENOMEM; } /** * @brief Internal function: * checks if the source route belongs to the given table */ static int fib_is_sr_in_table(fib_table_t *table, fib_sr_t *fib_sr) { for (size_t i = 0; i < table->size; ++i) { if (&(table->data.source_routes->headers[i]) == fib_sr) { return 0; } } return -ENOENT; } int fib_sr_read_head(fib_table_t *table, fib_sr_t *fib_sr, kernel_pid_t *iface_id, uint32_t *sr_flags, uint32_t *sr_lifetime) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (iface_id == NULL) || (sr_flags == NULL) || (sr_lifetime == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT) ) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } *iface_id = fib_sr->sr_iface_id; *sr_flags = fib_sr->sr_flags; *sr_lifetime = fib_sr->sr_lifetime - xtimer_now_usec64(); mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_read_destination(fib_table_t *table, fib_sr_t *fib_sr, uint8_t *dst, size_t *dst_size) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (dst == NULL) || (dst_size == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } if (fib_sr->sr_dest == NULL) { mutex_unlock(&(table->mtx_access)); return -EHOSTUNREACH; } if (universal_address_get_address(fib_sr->sr_dest->address, dst, dst_size) == NULL) { mutex_unlock(&(table->mtx_access)); return -ENOBUFS; } mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_set(fib_table_t *table, fib_sr_t *fib_sr, kernel_pid_t *sr_iface_id, uint32_t *sr_flags, uint32_t *sr_lifetime) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } if (sr_iface_id != NULL) { fib_sr->sr_iface_id = *sr_iface_id; } if (sr_flags != NULL) { fib_sr->sr_flags = *sr_flags; } if (sr_lifetime != NULL) { fib_lifetime_to_absolute(*sr_lifetime, &(fib_sr->sr_lifetime)); } mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_delete(fib_table_t *table, fib_sr_t *fib_sr) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } fib_sr->sr_lifetime = 0; if (fib_sr->sr_path != NULL) { fib_sr_entry_t *elt = NULL, *tmp = NULL; LL_FOREACH_SAFE(fib_sr->sr_path, elt, tmp) { universal_address_rem(elt->address); elt->address = NULL; LL_DELETE(fib_sr->sr_path, elt); } fib_sr->sr_path = NULL; } mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_next(fib_table_t *table, fib_sr_t *fib_sr, fib_sr_entry_t **sr_path_entry) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (sr_path_entry == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr->sr_path == NULL) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } /* if we reach the destination entry, i.e. the last entry we just return 1 */ if (*sr_path_entry == fib_sr->sr_dest) { mutex_unlock(&(table->mtx_access)); return 1; } /* when we start, we pass the first entry */ if (*sr_path_entry == NULL) { *sr_path_entry = fib_sr->sr_path; } else { /* in any other case we just return the next entry */ *sr_path_entry = (*sr_path_entry)->next; } mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_search(fib_table_t *table, fib_sr_t *fib_sr, uint8_t *addr, size_t addr_size, fib_sr_entry_t **sr_path_entry) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (addr == NULL) || (sr_path_entry == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } fib_sr_entry_t *elt = NULL; LL_FOREACH(fib_sr->sr_path, elt) { size_t addr_size_match = addr_size << 3; if (universal_address_compare(elt->address, addr, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { /* temporary workaround to calm compiler */ (void)sr_path_entry; *sr_path_entry = elt; mutex_unlock(&(table->mtx_access)); return 0; } } mutex_unlock(&(table->mtx_access)); return -EHOSTUNREACH; } int fib_sr_entry_append(fib_table_t *table, fib_sr_t *fib_sr, uint8_t *addr, size_t addr_size) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (addr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } fib_sr_entry_t *elt = NULL; LL_FOREACH(fib_sr->sr_path, elt) { size_t addr_size_match = addr_size << 3; if (universal_address_compare(elt->address, addr, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { mutex_unlock(&(table->mtx_access)); return -EINVAL; } } fib_sr_entry_t *new_entry[1]; int ret = fib_sr_new_entry(table, addr, addr_size, &new_entry[0]); if (ret == 0) { fib_sr_entry_t *tmp = fib_sr->sr_dest; if (tmp != NULL) { /* we append the new entry behind the former destination */ tmp->next = new_entry[0]; } else { /* this is also our first entry */ fib_sr->sr_path = new_entry[0]; } fib_sr->sr_dest = new_entry[0]; } mutex_unlock(&(table->mtx_access)); return ret; } int fib_sr_entry_add(fib_table_t *table, fib_sr_t *fib_sr, fib_sr_entry_t *sr_path_entry, uint8_t *addr, size_t addr_size, bool keep_remaining_route) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (sr_path_entry == NULL) || (addr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } bool found = false; fib_sr_entry_t *elt = NULL; LL_FOREACH(fib_sr->sr_path, elt) { size_t addr_size_match = addr_size << 3; if (universal_address_compare(elt->address, addr, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { mutex_unlock(&(table->mtx_access)); return -EINVAL; } if (sr_path_entry == elt) { found = true; break; } } int ret = -ENOENT; if (found) { fib_sr_entry_t *new_entry[1]; ret = fib_sr_new_entry(table, addr, addr_size, &new_entry[0]); if (ret == 0) { fib_sr_entry_t *remaining = sr_path_entry->next; sr_path_entry->next = new_entry[0]; if (keep_remaining_route) { new_entry[0]->next = remaining; } else { fib_sr_entry_t *elt = NULL, *tmp = NULL; LL_FOREACH_SAFE(remaining, elt, tmp) { universal_address_rem(elt->address); elt->address = NULL; LL_DELETE(remaining, elt); } new_entry[0]->next = NULL; fib_sr->sr_dest = new_entry[0]; } } } mutex_unlock(&(table->mtx_access)); return ret; } int fib_sr_entry_delete(fib_table_t *table, fib_sr_t *fib_sr, uint8_t *addr, size_t addr_size, bool keep_remaining_route) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } fib_sr_entry_t *elt = NULL, *tmp; tmp = fib_sr->sr_path; LL_FOREACH(fib_sr->sr_path, elt) { size_t addr_size_match = addr_size << 3; if (universal_address_compare(elt->address, addr, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { universal_address_rem(elt->address); if (keep_remaining_route) { tmp->next = elt->next; } else { fib_sr_entry_t *elt_del = NULL, *tmp_del = NULL; LL_FOREACH_SAFE(tmp, elt_del, tmp_del) { universal_address_rem(elt_del->address); elt_del->address = NULL; LL_DELETE(tmp, elt_del); } } if (elt == fib_sr->sr_path) { /* if we remove the first entry we must adjust the path start */ fib_sr->sr_path = elt->next; } if (elt == fib_sr->sr_dest) { /* if we remove the last entry we must adjust the destination */ fib_sr->sr_dest = tmp; } mutex_unlock(&(table->mtx_access)); return 0; } tmp = elt; } return -ENOENT; } int fib_sr_entry_overwrite(fib_table_t *table, fib_sr_t *fib_sr, uint8_t *addr_old, size_t addr_old_size, uint8_t *addr_new, size_t addr_new_size) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (addr_old == NULL) || (addr_new == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } fib_sr_entry_t *elt = NULL, *elt_repl; elt_repl = NULL; LL_FOREACH(fib_sr->sr_path, elt) { size_t addr_old_size_match = addr_old_size << 3; size_t addr_new_size_match = addr_old_size << 3; if (universal_address_compare(elt->address, addr_old, &addr_old_size_match) == UNIVERSAL_ADDRESS_EQUAL) { elt_repl = elt; } if (universal_address_compare(elt->address, addr_new, &addr_new_size_match) == UNIVERSAL_ADDRESS_EQUAL) { mutex_unlock(&(table->mtx_access)); return -EINVAL; } } if (elt_repl != NULL) { universal_address_rem(elt_repl->address); universal_address_container_t *add = universal_address_add(addr_new, addr_new_size); if (add == NULL) { /* if this happened we deleted one entry, i.e. decreased the usecount * adding a new one was not possible since lack of memory * so we add back the old entry, i.e. increasing the usecount */ universal_address_add(addr_old, addr_old_size); mutex_unlock(&(table->mtx_access)); return -ENOMEM; } elt_repl->address = add; } mutex_unlock(&(table->mtx_access)); return 0; } int fib_sr_entry_get_address(fib_table_t *table, fib_sr_t *fib_sr, fib_sr_entry_t *sr_entry, uint8_t *addr, size_t *addr_size) { mutex_lock(&(table->mtx_access)); if ((fib_sr == NULL) || (fib_is_sr_in_table(table, fib_sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } if (fib_sr_check_lifetime(fib_sr) == -ENOENT) { mutex_unlock(&(table->mtx_access)); return -ENOENT; } fib_sr_entry_t *elt = NULL; LL_FOREACH(fib_sr->sr_path, elt) { if (elt == sr_entry) { if (universal_address_get_address(elt->address, addr, addr_size) != NULL) { mutex_unlock(&(table->mtx_access)); return 0; } else { mutex_unlock(&(table->mtx_access)); return -ENOMEM; } } } mutex_unlock(&(table->mtx_access)); return -ENOENT; } /** * @brief helper function to search a partial path to a given destination, * and iff successful to create a new source route * * @param[in] table the fib table the entry should be added to * @param[in] dst pointer to the destination address bytes * @param[in] dst_size the size in bytes of the destination address type * @param[in] check_free_entry position to start the search for a free entry * @param[out] error the state of of this operation when finished * * @return pointer to the new source route on success * NULL otherwise */ static fib_sr_t* _fib_create_sr_from_partial(fib_table_t *table, uint8_t *dst, size_t dst_size, int check_free_entry, int *error) { fib_sr_t* hit = NULL; for (size_t i = 0; i < table->size; ++i) { if (table->data.source_routes->headers[i].sr_lifetime != 0) { fib_sr_entry_t *elt = NULL; LL_FOREACH(table->data.source_routes->headers[i].sr_path, elt) { size_t addr_size_match = dst_size << 3; if (universal_address_compare(elt->address, dst, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { /* we create a new sr */ if (check_free_entry == -1) { /* we have no room to create a new sr * so we just return and NOT tell the RPs to find a route * since we cannot save it */ *error = -ENOBUFS; return NULL; } else { /* we check if there is a free place for the new sr */ fib_sr_t *new_sr = NULL; for (size_t j = check_free_entry; j < table->size; ++j) { if (table->data.source_routes->headers[j].sr_lifetime != 0) { /* not this one, maybe the next one */ continue; } else { /* there it is, so we copy the header */ new_sr = &table->data.source_routes->headers[j]; new_sr->sr_iface_id = table->data.source_routes->headers[i].sr_iface_id; new_sr->sr_flags = table->data.source_routes->headers[i].sr_flags; new_sr->sr_lifetime = table->data.source_routes->headers[i].sr_lifetime; new_sr->sr_path = NULL; /* and the path until the searched destination */ fib_sr_entry_t *elt_iter = NULL, *elt_add = NULL; LL_FOREACH(table->data.source_routes->headers[i].sr_path, elt_iter) { fib_sr_entry_t *new_entry; if (fib_sr_new_entry(table, elt_iter->address->address, elt_iter->address->address_size, &new_entry) != 0) { /* we could not create a new entry * so we return to clean up the partial route */ *error = -ENOBUFS; return new_sr; } if (new_sr->sr_path == NULL) { new_sr->sr_path = new_entry; elt_add = new_sr->sr_path; } else { elt_add->next = new_entry; elt_add = elt_add->next; } if (elt_iter == elt) { /* we copied until the destination */ new_sr->sr_dest = new_entry; hit = new_sr; /* tell the RPs that a new sr has been created * the size and the flags parameters are ignored */ if (fib_signal_rp(table, FIB_MSG_RP_SIGNAL_SOURCE_ROUTE_CREATED, (uint8_t *)new_sr, 0, 0) != 0) { /* if no RP can handle the source route * then the host is not directly reachable */ *error = -EHOSTUNREACH; } /* break from iterating for copy */ break; } } } } /* break from iterating the found path */ break; } } } if (hit != NULL) { /* break iterating all sr since we have a path now */ break; } } } return hit; } int fib_sr_get_route(fib_table_t *table, uint8_t *dst, size_t dst_size, kernel_pid_t *sr_iface_id, uint32_t *sr_flags, uint8_t *addr_list, size_t *addr_list_elements, size_t *element_size, bool reverse, fib_sr_t **fib_sr) { mutex_lock(&(table->mtx_access)); if ((dst == NULL) || (sr_iface_id == NULL) || (sr_flags == NULL) || (addr_list == NULL) || (addr_list_elements == NULL) || (element_size == NULL)) { mutex_unlock(&(table->mtx_access)); return -EFAULT; } fib_sr_t *hit = NULL; fib_sr_t *tmp_hit = NULL; int check_free_entry = -1; bool skip = (fib_sr != NULL) && (*fib_sr != NULL)?true:false; /* Case 1 - check if we know a direct route */ for (size_t i = 0; i < table->size; ++i) { if (fib_sr_check_lifetime(&table->data.source_routes->headers[i]) == -ENOENT) { /* expired, so skip this sr and remember its position */ if (check_free_entry == -1) { /* we want to fill up the source routes from the beginning */ check_free_entry = i; } continue; } if( skip ) { if(*fib_sr == &table->data.source_routes->headers[i]) { skip = false; } /* we skip all entries upon the consecutive one to start search */ continue; } size_t addr_size_match = dst_size << 3; if (universal_address_compare(table->data.source_routes->headers[i].sr_dest->address, dst, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { if (*sr_flags == table->data.source_routes->headers[i].sr_flags) { /* found a perfect matching sr, no need to search further */ hit = &table->data.source_routes->headers[i]; tmp_hit = NULL; if (check_free_entry == -1) { check_free_entry = i; } break; } else { /* found a sr to the destination but with different flags, * maybe we find a better one. */ tmp_hit = &table->data.source_routes->headers[i]; } } } if (hit == NULL) { /* we didn't find a perfect sr, but one with distinct flags */ hit = tmp_hit; } /* Case 2 - if no hit is found check if there is a matching entry in one sr_path * @note the first match wins, if we find one we will NOT continue searching, * since this search is very expensive in terms of compare operations */ if (hit == NULL) { int error = 0; hit = _fib_create_sr_from_partial(table, dst, dst_size, check_free_entry, &error); if ((error != 0) && (error != -EHOSTUNREACH)) { /* something went wrong, so we clean up our mess * * @note we could handle -EHOSTUNREACH differently here, * since it says that we have a partial source route but no RP * to manage it. * That's why I let it pass for now. */ if (hit != NULL) { hit->sr_lifetime = 0; if (hit->sr_path != NULL) { fib_sr_entry_t *elt = NULL, *tmp = NULL; LL_FOREACH_SAFE(hit->sr_path, elt, tmp) { universal_address_rem(elt->address); elt->address = NULL; LL_DELETE(hit->sr_path, elt); } hit->sr_path = NULL; } } mutex_unlock(&(table->mtx_access)); return error; } } /* Final step - copy the list in the desired order */ if (hit != NULL) { /* store the current hit to enable consecutive searches */ if( fib_sr != NULL ) { *fib_sr = hit; } /* check the list size and if the sr entries will fit */ int count; fib_sr_entry_t *elt = NULL; LL_COUNT(hit->sr_path, elt, count); if (((size_t)count > *addr_list_elements) || (sizeof(hit->sr_path->address->address) > *element_size)) { *addr_list_elements = count; *element_size = sizeof(hit->sr_path->address->address); mutex_unlock(&(table->mtx_access)); return -ENOBUFS; } /* start copy the individual entries in the desired order */ uint8_t *next_entry = addr_list; int one_address_size = *element_size; if (reverse) { /* we move to the last list element */ next_entry += (count - 1) * sizeof(hit->sr_path->address->address); /* and set the storing direction during the iteration */ one_address_size *= -1; } elt = NULL; LL_FOREACH(hit->sr_path, elt) { size_t tmp_size = sizeof(hit->sr_path->address->address); universal_address_get_address(elt->address, next_entry, &tmp_size); next_entry += one_address_size; } *sr_iface_id = hit->sr_iface_id; *sr_flags = hit->sr_flags; *addr_list_elements = count; *element_size = sizeof(hit->sr_path->address->address); } else { /* trigger RPs for route discovery */ fib_signal_rp(table, FIB_MSG_RP_SIGNAL_UNREACHABLE_DESTINATION, dst, dst_size, *sr_flags); mutex_unlock(&(table->mtx_access)); return -EHOSTUNREACH; } mutex_unlock(&(table->mtx_access)); if (tmp_hit == NULL) { return 0; } else { return 1; } } /* print functions */ void fib_print_notify_rp(fib_table_t *table) { mutex_lock(&(table->mtx_access)); for (size_t i = 0; i < FIB_MAX_REGISTERED_RP; ++i) { printf("[fib_print_notify_rp] pid[%d]: %d\n", (int)i, (int)(table->notify_rp[i])); } mutex_unlock(&(table->mtx_access)); } void fib_print_fib_table(fib_table_t *table) { mutex_lock(&(table->mtx_access)); for (size_t i = 0; i < table->size; ++i) { printf("[fib_print_table] %d) iface_id: %d, global: %p, next hop: %p, lifetime: %"PRIu32"\n", (int)i, (int)table->data.entries[i].iface_id, (void *)table->data.entries[i].global, (void *)table->data.entries[i].next_hop, (uint32_t)(table->data.entries[i].lifetime / 1000)); } mutex_unlock(&(table->mtx_access)); } void fib_print_sr(fib_table_t *table, fib_sr_t *sr) { /* does not adjust the lifetime */ mutex_lock(&(table->mtx_access)); if ((sr == NULL) || (fib_is_sr_in_table(table, sr) == -ENOENT)) { mutex_unlock(&(table->mtx_access)); return; } printf("\n-= Source route (%p) =-\nIface: %d\nflags: %x\npath: %p\ndest: ", (void *)sr, sr->sr_iface_id, (unsigned int)sr->sr_flags, (void *)sr->sr_path); if (sr->sr_dest != NULL) { universal_address_print_entry(sr->sr_dest->address); } else { puts("Not set."); } fib_sr_entry_t *nxt = sr->sr_path; while (nxt) { universal_address_print_entry(nxt->address); nxt = nxt->next; } printf("-= END (%p) =-\n", (void *)sr); mutex_unlock(&(table->mtx_access)); } static void fib_print_address(universal_address_container_t *entry) { uint8_t address[UNIVERSAL_ADDRESS_SIZE]; size_t addr_size = UNIVERSAL_ADDRESS_SIZE; uint8_t *ret = universal_address_get_address(entry, address, &addr_size); if (ret == address) { #ifdef MODULE_IPV6_ADDR if (addr_size == sizeof(ipv6_addr_t)) { printf("%-" FIB_ADDR_PRINT_LENS "s", ipv6_addr_to_str(addr_str, (ipv6_addr_t *) address, sizeof(addr_str))); return; } #endif for (size_t i = 0; i < UNIVERSAL_ADDRESS_SIZE; ++i) { if (i <= addr_size) { printf("%02x", address[i]); } else { printf(" "); } } #ifdef MODULE_IPV6_ADDR /* print trailing whitespaces */ for (size_t i = 0; i < FIB_ADDR_PRINT_LEN - (UNIVERSAL_ADDRESS_SIZE * 2); ++i) { printf(" "); } #endif } } void fib_print_routes(fib_table_t *table) { mutex_lock(&(table->mtx_access)); uint64_t now = xtimer_now_usec64(); if (table->table_type == FIB_TABLE_TYPE_SH) { printf("%-" FIB_ADDR_PRINT_LENS "s %-17s %-" FIB_ADDR_PRINT_LENS "s %-10s %-16s" " Interface\n" , "Destination", "Flags", "Next Hop", "Flags", "Expires"); for (size_t i = 0; i < table->size; ++i) { if (table->data.entries[i].lifetime != 0) { fib_print_address(table->data.entries[i].global); printf(" 0x%08"PRIx32" ", table->data.entries[i].global_flags); if(table->data.entries[i].global_flags & FIB_FLAG_NET_PREFIX_MASK) { uint32_t prefix = (table->data.entries[i].global_flags & FIB_FLAG_NET_PREFIX_MASK); printf("N /%-3d ", (int)(prefix >> FIB_FLAG_NET_PREFIX_SHIFT)); } else { printf("H "); } fib_print_address(table->data.entries[i].next_hop); printf(" 0x%08"PRIx32" ", table->data.entries[i].next_hop_flags); if (table->data.entries[i].lifetime != FIB_LIFETIME_NO_EXPIRE) { uint64_t tm = table->data.entries[i].lifetime - now; /* we must interpret the values as signed */ if ((int64_t)tm < 0 ) { printf("%-16s ", "EXPIRED"); } else { printf("%"PRIu32".%05"PRIu32, (uint32_t)(tm / 1000000), (uint32_t)(tm % 1000000)); } } else { printf("%-16s ", "NEVER"); } printf("%d\n", (int)table->data.entries[i].iface_id); } } } else if (table->table_type == FIB_TABLE_TYPE_SR) { printf("%-" FIB_ADDR_PRINT_LENS "s %-" FIB_ADDR_PRINT_LENS "s %-6s %-16s Interface\n" , "SR Destination", "SR First Hop", "SR Flags", "Expires"); for (size_t i = 0; i < table->size; ++i) { if (table->data.source_routes->headers[i].sr_lifetime != 0) { fib_print_address(table->data.source_routes->headers[i].sr_dest->address); fib_print_address(table->data.source_routes->headers[i].sr_path->address); printf(" 0x%04"PRIx32" ", table->data.source_routes->headers[i].sr_flags); if (table->data.source_routes->headers[i].sr_lifetime != FIB_LIFETIME_NO_EXPIRE) { uint64_t tm = table->data.source_routes->headers[i].sr_lifetime - now; /* we must interpret the values as signed */ if ((int64_t)tm < 0 ) { printf("%-16s ", "EXPIRED"); } else { printf("%"PRIu32".%05"PRIu32, (uint32_t)(tm / 1000000), (uint32_t)(tm % 1000000)); } } else { printf("%-16s ", "NEVER"); } printf("%d\n", (int)table->data.source_routes->headers[i].sr_iface_id); } } } mutex_unlock(&(table->mtx_access)); } #if FIB_DEVEL_HELPER int fib_devel_get_lifetime(fib_table_t *table, uint64_t *lifetime, uint8_t *dst, size_t dst_size) { if (table->table_type == FIB_TABLE_TYPE_SH) { size_t count = 1; fib_entry_t *entry[count]; int ret = fib_find_entry(table, dst, dst_size, &(entry[0]), &count); if (ret == 1 ) { /* only return lifetime of exact matches */ *lifetime = entry[0]->lifetime; return 0; } return -EHOSTUNREACH; } else if (table->table_type == FIB_TABLE_TYPE_SR) { size_t addr_size_match = dst_size << 3; /* first hit wins here */ for (size_t i = 0; i < table->size; ++i) { if (universal_address_compare(table->data.source_routes->headers[i].sr_dest->address, dst, &addr_size_match) == UNIVERSAL_ADDRESS_EQUAL) { *lifetime = table->data.source_routes->headers[i].sr_lifetime; return 0; } } return -EHOSTUNREACH; } return -EFAULT; } #endif
yogo1212/RIOT
sys/net/network_layer/fib/fib.c
C
lgpl-2.1
56,622
/*= -*- c-basic-offset: 4; indent-tabs-mode: nil; -*- * * librsync -- the library for network deltas * $Id: msg.c,v 1.15 2003/06/12 05:47:22 wayned Exp $ * * Copyright (C) 2000, 2001 by Martin Pool <mbp@samba.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* | Welcome to Arco AM/PM Mini-Market. We | would like to advise our customers | that any individual who offers to | pump gas, wash windows or solicit | products is not employed by or | associated with this facility. We | discourage any contact with these | individuals and ask that you report | any problems to uniformed personal | inside. Thankyou for shopping at | Arco, and have a nice day. */ #include <config.h> #include <stdlib.h> #include <stdio.h> #include "librsync.h" /* * TODO: (Suggestion by tridge) Add a function which outputs a * complete text description of a job, including only the fields * relevant to the current encoding function. */ /** \brief Translate from rs_result to human-readable messages. */ char const *rs_strerror(rs_result r) { switch (r) { case RS_DONE: return "OK"; case RS_RUNNING: return "still running"; case RS_BLOCKED: return "blocked waiting for input or output buffers"; case RS_BAD_MAGIC: return "bad magic number at start of stream"; case RS_INPUT_ENDED: return "unexpected end of input"; case RS_CORRUPT: return "stream corrupt"; case RS_UNIMPLEMENTED: return "unimplemented case"; case RS_MEM_ERROR: return "out of memory"; case RS_IO_ERROR: return "IO error"; case RS_SYNTAX_ERROR: return "bad command line syntax"; case RS_INTERNAL_ERROR: return "library internal error"; default: return "unexplained problem"; } }
kaseya/librsync
msg.c
C
lgpl-2.1
2,883
#include "board.h" #include "uart.h" #include "app_util_platform.h" #include "nrf_drv_common.h" #include "nrf_systick.h" #include "nrf_rtc.h" #include "nrf_drv_clock.h" #include "softdevice_handler.h" #include "nrf_drv_uart.h" #include "nrf_gpio.h" #include <rtthread.h> #include <rthw.h> #if 0 /******************************************************************************* * Function Name : SysTick_Configuration * Description : Configures the SysTick for OS tick. * Input : None * Output : None * Return : None *******************************************************************************/ void SysTick_Configuration(void) { nrf_drv_common_irq_enable(SysTick_IRQn, APP_TIMER_CONFIG_IRQ_PRIORITY); nrf_systick_load_set(SystemCoreClock / RT_TICK_PER_SECOND); nrf_systick_val_clear(); nrf_systick_csr_set(NRF_SYSTICK_CSR_CLKSOURCE_CPU | NRF_SYSTICK_CSR_TICKINT_ENABLE | NRF_SYSTICK_CSR_ENABLE); } /** * This is the timer interrupt service routine. * */ void SysTick_Handler(void) { if (rt_thread_self() != RT_NULL) { /* enter interrupt */ rt_interrupt_enter(); rt_tick_increase(); /* leave interrupt */ rt_interrupt_leave(); } } #else #define TICK_RATE_HZ RT_TICK_PER_SECOND #define SYSTICK_CLOCK_HZ ( 32768UL ) #define NRF_RTC_REG NRF_RTC1 /* IRQn used by the selected RTC */ #define NRF_RTC_IRQn RTC1_IRQn /* Constants required to manipulate the NVIC. */ #define NRF_RTC_PRESCALER ( (uint32_t) (ROUNDED_DIV(SYSTICK_CLOCK_HZ, TICK_RATE_HZ) - 1) ) /* Maximum RTC ticks */ #define NRF_RTC_MAXTICKS ((1U<<24)-1U) static volatile uint32_t m_tick_overflow_count = 0; #define NRF_RTC_BITWIDTH 24 #define OSTick_Handler RTC1_IRQHandler #define EXPECTED_IDLE_TIME_BEFORE_SLEEP 2 void SysTick_Configuration(void) { nrf_drv_clock_lfclk_request(NULL); /* Configure SysTick to interrupt at the requested rate. */ nrf_rtc_prescaler_set(NRF_RTC_REG, NRF_RTC_PRESCALER); nrf_rtc_int_enable (NRF_RTC_REG, RTC_INTENSET_TICK_Msk); nrf_rtc_task_trigger (NRF_RTC_REG, NRF_RTC_TASK_CLEAR); nrf_rtc_task_trigger (NRF_RTC_REG, NRF_RTC_TASK_START); nrf_rtc_event_enable(NRF_RTC_REG, RTC_EVTEN_OVRFLW_Msk); NVIC_SetPriority(NRF_RTC_IRQn, 0xF); NVIC_EnableIRQ(NRF_RTC_IRQn); } static rt_tick_t _tick_distance(void) { nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_COMPARE_0); uint32_t systick_counter = nrf_rtc_counter_get(NRF_RTC_REG); nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_TICK); /* check for overflow in TICK counter */ if(nrf_rtc_event_pending(NRF_RTC_REG, NRF_RTC_EVENT_OVERFLOW)) { nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_OVERFLOW); m_tick_overflow_count++; } return ((m_tick_overflow_count << NRF_RTC_BITWIDTH) + systick_counter) - rt_tick_get(); } void OSTick_Handler( void ) { uint32_t diff; diff = _tick_distance(); while((diff--) > 0) { if (rt_thread_self() != RT_NULL) { /* enter interrupt */ rt_interrupt_enter(); rt_tick_increase(); /* leave interrupt */ rt_interrupt_leave(); } } } static void _wakeup_tick_adjust(void) { uint32_t diff; uint32_t level; level = rt_hw_interrupt_disable(); diff = _tick_distance(); rt_tick_set(rt_tick_get() + diff); if (rt_thread_self() != RT_NULL) { struct rt_thread *thread; /* check time slice */ thread = rt_thread_self(); if (thread->remaining_tick <= diff) { /* change to initialized tick */ thread->remaining_tick = thread->init_tick; /* yield */ rt_thread_yield(); } else { thread->remaining_tick -= diff; } /* check timer */ rt_timer_check(); } rt_hw_interrupt_enable(level); } static void _sleep_ongo( uint32_t sleep_tick ) { uint32_t enterTime; uint32_t entry_tick; /* Make sure the SysTick reload value does not overflow the counter. */ if ( sleep_tick > NRF_RTC_MAXTICKS - EXPECTED_IDLE_TIME_BEFORE_SLEEP ) { sleep_tick = NRF_RTC_MAXTICKS - EXPECTED_IDLE_TIME_BEFORE_SLEEP; } rt_enter_critical(); enterTime = nrf_rtc_counter_get(NRF_RTC_REG); { uint32_t wakeupTime = (enterTime + sleep_tick) & NRF_RTC_MAXTICKS; /* Stop tick events */ nrf_rtc_int_disable(NRF_RTC_REG, NRF_RTC_INT_TICK_MASK); /* Configure CTC interrupt */ nrf_rtc_cc_set(NRF_RTC_REG, 0, wakeupTime); nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_COMPARE_0); nrf_rtc_int_enable(NRF_RTC_REG, NRF_RTC_INT_COMPARE0_MASK); entry_tick = rt_tick_get(); __DSB(); if ( sleep_tick > 0 ) { #ifdef SOFTDEVICE_PRESENT if (softdevice_handler_is_enabled()) { uint32_t err_code = sd_app_evt_wait(); APP_ERROR_CHECK(err_code); } else #endif { /* No SD - we would just block interrupts globally. * BASEPRI cannot be used for that because it would prevent WFE from wake up. */ do{ __WFE(); } while (0 == (NVIC->ISPR[0] | NVIC->ISPR[1])); } } nrf_rtc_int_disable(NRF_RTC_REG, NRF_RTC_INT_COMPARE0_MASK); nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_COMPARE_0); _wakeup_tick_adjust(); /* Correct the system ticks */ { nrf_rtc_event_clear(NRF_RTC_REG, NRF_RTC_EVENT_TICK); nrf_rtc_int_enable (NRF_RTC_REG, NRF_RTC_INT_TICK_MASK); /* It is important that we clear pending here so that our corrections are latest and in sync with tick_interrupt handler */ NVIC_ClearPendingIRQ(NRF_RTC_IRQn); } rt_kprintf("entry tick:%u, expected:%u, current tick:%u\n", entry_tick, sleep_tick, rt_tick_get()); } rt_exit_critical(); } #endif void rt_hw_system_powersave(void) { uint32_t sleep_tick; sleep_tick = rt_timer_next_timeout_tick() - rt_tick_get(); if ( sleep_tick >= EXPECTED_IDLE_TIME_BEFORE_SLEEP) { // rt_kprintf("sleep entry:%u\n", rt_tick_get()); _sleep_ongo( sleep_tick ); } } void rt_hw_board_init(void) { // sd_power_dcdc_mode_set(NRF_POWER_DCDC_ENABLE); /* Activate deep sleep mode */ SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; nrf_drv_clock_init(); // nrf_drv_clock_hfclk_request(0); SysTick_Configuration(); rt_thread_idle_sethook(rt_hw_system_powersave); rt_hw_uart_init(); #ifdef RT_USING_CONSOLE rt_console_set_device(RT_CONSOLE_DEVICE_NAME); #endif #ifdef RT_USING_COMPONENTS_INIT rt_components_board_init(); #endif }
igou/rt-thread
bsp/nrf52832/board/board.c
C
apache-2.0
6,920
/* * linux/mm/vmscan.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). * Multiqueue VM started 5.8.00, Rik van Riel. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmpressure.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for try_to_release_page(), buffer_heads_over_limit */ #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/compaction.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/memcontrol.h> #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/oom.h> #include <linux/prefetch.h> #include <linux/debugfs.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include <linux/balloon_compaction.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; /* How many pages shrink_list() should reclaim */ unsigned long nr_to_reclaim; unsigned long hibernation_mode; /* This context's GFP mask */ gfp_t gfp_mask; int may_writepage; /* Can mapped pages be reclaimed? */ int may_unmap; /* Can pages be swapped as part of reclaim? */ int may_swap; int order; /* Scan (total_size >> priority) pages at once */ int priority; /* * The memory cgroup that hit its limit and as a result is the * primary target of this reclaim invocation. */ struct mem_cgroup *target_mem_cgroup; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ nodemask_t *nodemask; }; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) #ifdef ARCH_HAS_PREFETCH #define prefetch_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) #else #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) #endif #ifdef ARCH_HAS_PREFETCHW #define prefetchw_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) #endif /* * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; unsigned long vm_total_pages; /* The total number of pages which the VM controls */ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } #else static bool global_reclaim(struct scan_control *sc) { return true; } #endif static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) return mem_cgroup_get_lru_size(lruvec, lru); return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } struct dentry *debug_file; static int debug_shrinker_show(struct seq_file *s, void *unused) { struct shrinker *shrinker; struct shrink_control sc; sc.gfp_mask = -1; sc.nr_to_scan = 0; down_read(&shrinker_rwsem); list_for_each_entry(shrinker, &shrinker_list, list) { int num_objs; num_objs = shrinker->shrink(shrinker, &sc); seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs); } up_read(&shrinker_rwsem); return 0; } static int debug_shrinker_open(struct inode *inode, struct file *file) { return single_open(file, debug_shrinker_show, inode->i_private); } static const struct file_operations debug_shrinker_fops = { .open = debug_shrinker_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Add a shrinker callback to be called from the vm */ void register_shrinker(struct shrinker *shrinker) { atomic_long_set(&shrinker->nr_in_batch, 0); down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); } EXPORT_SYMBOL(register_shrinker); static int __init add_shrinker_debug(void) { debugfs_create_file("shrinker", 0644, NULL, NULL, &debug_shrinker_fops); return 0; } late_initcall(add_shrinker_debug); /* * Remove one */ void unregister_shrinker(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); } EXPORT_SYMBOL(unregister_shrinker); static inline int do_shrinker_shrink(struct shrinker *shrinker, struct shrink_control *sc, unsigned long nr_to_scan) { sc->nr_to_scan = nr_to_scan; return (*shrinker->shrink)(shrinker, sc); } #define SHRINK_BATCH 128 /* * Call the shrink functions to age shrinkable caches * * Here we assume it costs one seek to replace a lru page and that it also * takes a seek to recreate a cache object. With this in mind we age equal * percentages of the lru and ageable caches. This should balance the seeks * generated by these structures. * * If the vm encountered mapped pages on the LRU it increase the pressure on * slab to avoid swapping. * * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. * * `lru_pages' represents the number of on-LRU pages in all the zones which * are eligible for the caller's allocation attempt. It is used for balancing * slab reclaim versus page reclaim. * * Returns the number of slab objects which we shrunk. */ unsigned long shrink_slab(struct shrink_control *shrink, unsigned long nr_pages_scanned, unsigned long lru_pages) { struct shrinker *shrinker; unsigned long ret = 0; if (nr_pages_scanned == 0) nr_pages_scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) { /* Assume we'll be able to shrink next time */ ret = 1; goto out; } list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; long total_scan; long max_pass; int shrink_ret = 0; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; max_pass = do_shrinker_shrink(shrinker, shrink, 0); if (max_pass <= 0) continue; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); total_scan = nr; delta = (4 * nr_pages_scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); total_scan += delta; if (total_scan < 0) { printk(KERN_ERR "shrink_slab: %pF negative objects to " "delete nr=%ld\n", shrinker->shrink, total_scan); total_scan = max_pass; } /* * We need to avoid excessive windup on filesystem shrinkers * due to large numbers of GFP_NOFS allocations causing the * shrinkers to return -1 all the time. This results in a large * nr being built up so when a shrink that can do some work * comes along it empties the entire cache due to nr >>> * max_pass. This is bad for sustaining a working set in * memory. * * Hence only allow the shrinker to scan the entire cache when * a large delta change is calculated directly. */ if (delta < max_pass / 4) total_scan = min(total_scan, max_pass / 2); /* * Avoid risking looping forever due to too large nr value: * never try to free more than twice the estimate number of * freeable entries. */ if (total_scan > max_pass * 2) total_scan = max_pass * 2; trace_mm_shrink_slab_start(shrinker, shrink, nr, nr_pages_scanned, lru_pages, max_pass, delta, total_scan); while (total_scan >= batch_size) { int nr_before; nr_before = do_shrinker_shrink(shrinker, shrink, 0); shrink_ret = do_shrinker_shrink(shrinker, shrink, batch_size); if (shrink_ret == -1) break; if (shrink_ret < nr_before) ret += nr_before - shrink_ret; count_vm_events(SLABS_SCANNED, batch_size); total_scan -= batch_size; cond_resched(); } /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ if (total_scan > 0) new_nr = atomic_long_add_return(total_scan, &shrinker->nr_in_batch); else new_nr = atomic_long_read(&shrinker->nr_in_batch); trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); } up_read(&shrinker_rwsem); out: cond_resched(); return ret; } static inline int is_page_cache_freeable(struct page *page) { /* * A freeable page cache page is referenced only by the caller * that isolated the page, the page cache radix tree and * optional buffer heads at page->private. */ return page_count(page) - page_has_private(page) == 2; } static int may_write_to_queue(struct backing_dev_info *bdi, struct scan_control *sc) { if (current->flags & PF_SWAPWRITE) return 1; if (!bdi_write_congested(bdi)) return 1; if (bdi == current->backing_dev_info) return 1; return 0; } /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the page and once * that page is locked, the mapping is pinned. * * We're allowed to run sleeping lock_page() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); if (page_mapping(page) == mapping) mapping_set_error(mapping, error); unlock_page(page); } /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ PAGE_KEEP, /* move page to the active list, page is locked */ PAGE_ACTIVATE, /* page has been sent to the disk successfully, page is unlocked */ PAGE_SUCCESS, /* page is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in __generic_file_aio_write() against * this page's queue, we can perform writeback even if that * will block. * * If the page is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * swap_backing_dev_info is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. */ if (!is_page_cache_freeable(page)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned pages can have * page->mapping == NULL while being dirty with clean buffers. */ if (page_has_private(page)) { if (try_to_free_buffers(page)) { ClearPageDirty(page); printk("%s: orphaned page\n", __func__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_queue(mapping->backing_dev_info, sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, }; SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); if (res == AOP_WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return PAGE_ACTIVATE; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } trace_mm_vmscan_writepage(page, trace_reclaim_flags(page)); inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Same as remove_mapping, but if the page is removed from the mapping, it * gets returned with a refcount of 0. */ static int __remove_mapping(struct address_space *mapping, struct page *page) { BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); spin_lock_irq(&mapping->tree_lock); /* * The non racy check for a busy page. * * Must be careful with the order of the tests. When someone has * a ref to the page, it may be possible that they dirty it then * drop the reference. So if PageDirty is tested before page_count * here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !PageDirty(page) [good] * SetPageDirty(page); * put_page(page); * !page_count(page) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the page->flags * load is not satisfied before that of page->_count. * * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ if (!page_freeze_refs(page, 2)) goto cannot_free; /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ if (unlikely(PageDirty(page))) { page_unfreeze_refs(page, 2); goto cannot_free; } if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); spin_unlock_irq(&mapping->tree_lock); swapcache_free(swap, page); } else { void (*freepage)(struct page *); freepage = mapping->a_ops->freepage; __delete_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); if (freepage != NULL) freepage(page); } return 1; cannot_free: spin_unlock_irq(&mapping->tree_lock); return 0; } /* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */ int remove_mapping(struct address_space *mapping, struct page *page) { if (__remove_mapping(mapping, page)) { /* * Unfreezing the refcount with 1 rather than 2 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ page_unfreeze_refs(page, 1); return 1; } return 0; } /** * putback_lru_page - put previously isolated page onto appropriate LRU list * @page: page to be put back to appropriate lru list * * Add previously isolated @page to appropriate LRU list. * Page may still be unevictable for other reasons. * * lru_lock must not be held, interrupts must be enabled. */ void putback_lru_page(struct page *page) { int lru; int active = !!TestClearPageActive(page); int was_unevictable = PageUnevictable(page); VM_BUG_ON(PageLRU(page)); redo: ClearPageUnevictable(page); if (page_evictable(page)) { /* * For evictable pages, we can use the cache. * In event of a race, worst case is we end up with an * unevictable page on [in]active list. * We know how to handle that. */ lru = active + page_lru_base_type(page); lru_cache_add_lru(page, lru); } else { /* * Put unevictable pages directly on zone's unevictable * list. */ lru = LRU_UNEVICTABLE; add_page_to_unevictable_list(page); /* * When racing with an mlock or AS_UNEVICTABLE clearing * (page is unlocked) make sure that if the other thread * does not observe our setting of PG_lru and fails * isolation/check_move_unevictable_pages, * we see PG_mlocked/AS_UNEVICTABLE cleared below and move * the page back to the evictable list. * * The other side is TestClearPageMlocked() or shmem_lock(). */ smp_mb(); } /* * page's status can change while we move it among lru. If an evictable * page is on unevictable list, it never be freed. To avoid that, * check after we added it to the list, again. */ if (lru == LRU_UNEVICTABLE && page_evictable(page)) { if (!isolate_lru_page(page)) { put_page(page); goto redo; } /* This means someone else dropped this page from LRU * So, it will be freed or putback to LRU again. There is * nothing to do here. */ } if (was_unevictable && lru != LRU_UNEVICTABLE) count_vm_event(UNEVICTABLE_PGRESCUED); else if (!was_unevictable && lru == LRU_UNEVICTABLE) count_vm_event(UNEVICTABLE_PGCULLED); put_page(page); /* drop ref from isolate */ } enum page_references { PAGEREF_RECLAIM, PAGEREF_RECLAIM_CLEAN, PAGEREF_KEEP, PAGEREF_ACTIVATE, }; static enum page_references page_check_references(struct page *page, struct scan_control *sc) { int referenced_ptes, referenced_page; unsigned long vm_flags; referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, &vm_flags); referenced_page = TestClearPageReferenced(page); /* * Mlock lost the isolation race with us. Let try_to_unmap() * move the page to the unevictable list. */ if (vm_flags & VM_LOCKED) return PAGEREF_RECLAIM; if (referenced_ptes) { if (PageSwapBacked(page)) return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need * to look twice if a mapped file page is used more * than once. * * Mark it and spare it for another trip around the * inactive list. Another page table reference will * lead to its activation. * * Note: the mark is set for activated pages as well * so that recently deactivated but used pages are * quickly recovered. */ SetPageReferenced(page); if (referenced_page || referenced_ptes > 1) return PAGEREF_ACTIVATE; /* * Activate file-backed executable pages after first usage. */ if (vm_flags & VM_EXEC) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; } /* Reclaim if clean, defer dirty pages to writeback */ if (referenced_page && !PageSwapBacked(page)) return PAGEREF_RECLAIM_CLEAN; return PAGEREF_RECLAIM; } /* * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc, enum ttu_flags ttu_flags, unsigned long *ret_nr_dirty, unsigned long *ret_nr_writeback, bool force_reclaim) { LIST_HEAD(ret_pages); LIST_HEAD(free_pages); int pgactivate = 0; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_reclaimed = 0; unsigned long nr_writeback = 0; cond_resched(); mem_cgroup_uncharge_start(); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; enum page_references references = PAGEREF_RECLAIM_CLEAN; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (!trylock_page(page)) goto keep; VM_BUG_ON(PageActive(page)); VM_BUG_ON(page_zone(page) != zone); sc->nr_scanned++; if (unlikely(!page_evictable(page))) goto cull_mlocked; if (!sc->may_unmap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); if (PageWriteback(page)) { /* * memcg doesn't have any dirty pages throttling so we * could easily OOM just because too many pages are in * writeback and there is nothing else to reclaim. * * Require may_enter_fs to wait on writeback, because * fs may not have submitted IO yet. And a loop driver * thread might enter reclaim, and deadlock if it waits * on a page for which it is needed to do the write * (loop masks off __GFP_IO|__GFP_FS for this reason); * but more thought would probably show more reasons. */ if (global_reclaim(sc) || !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then * setting PageReclaim here end up interpreted * as PageReadahead - but that does not matter * enough to care. What we do want is for this * page to have PageReclaim set next time memcg * reclaim reaches the tests above, so it will * then wait_on_page_writeback() to avoid OOM; * and it's also appropriate in global reclaim. */ SetPageReclaim(page); nr_writeback++; goto keep_locked; } wait_on_page_writeback(page); } if (!force_reclaim) references = page_check_references(page, sc); switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; case PAGEREF_KEEP: goto keep_locked; case PAGEREF_RECLAIM: case PAGEREF_RECLAIM_CLEAN: ; /* try to reclaim the page below */ } /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (!add_to_swap(page, page_list)) goto activate_locked; may_enter_fs = 1; } mapping = page_mapping(page); /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, ttu_flags)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_MLOCK: goto cull_mlocked; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { nr_dirty++; /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but do not writeback * unless under significant pressure. */ if (page_is_file_cache(page) && (!current_is_kswapd() || sc->priority >= DEF_PRIORITY - 2)) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() * except we already have the page isolated * and know it's dirty */ inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); SetPageReclaim(page); goto keep_locked; } if (references == PAGEREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sc)) { case PAGE_KEEP: nr_congested++; goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page)) goto keep; if (PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * truncate_complete_page(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (page_has_private(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) { unlock_page(page); if (put_page_testzero(page)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this page shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed++; continue; } } } if (!mapping || !__remove_mapping(mapping, page)) goto keep_locked; /* * At this point, we have no other references and there is * no way to pick any more up (removed from LRU, removed * from pagecache). Can use non-atomic bitops now (and * we obviously don't have to worry about waking up a process * waiting on the page lock, because there are no references. */ __clear_page_locked(page); free_it: nr_reclaimed++; /* * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ list_add(&page->lru, &free_pages); continue; cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); list_add(&page->lru, &ret_pages); continue; activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) try_to_free_swap(page); VM_BUG_ON(PageActive(page)); SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); } /* * Tag a zone as congested if all the dirty pages encountered were * backed by a congested BDI. In this case, reclaimers should just * back off and wait for congestion to clear because further reclaim * will encounter the same problem */ if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) zone_set_flag(zone, ZONE_CONGESTED); free_hot_cold_page_list(&free_pages, 1); list_splice(&ret_pages, page_list); count_vm_events(PGACTIVATE, pgactivate); mem_cgroup_uncharge_end(); *ret_nr_dirty += nr_dirty; *ret_nr_writeback += nr_writeback; return nr_reclaimed; } unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, .may_unmap = 1, }; unsigned long ret, dummy1, dummy2; struct page *page, *next; LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && !isolated_balloon_page(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } } ret = shrink_page_list(&clean_pages, zone, &sc, TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy1, &dummy2, true); list_splice(&clean_pages, page_list); __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); return ret; } /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being * freed elsewhere are also ignored. * * page: page to consider * mode: one of the LRU isolation modes defined above * * returns 0 on success, -ve errno on failure. */ int __isolate_lru_page(struct page *page, isolate_mode_t mode) { int ret = -EINVAL; /* Only take pages on the LRU. */ if (!PageLRU(page)) return ret; /* Compaction should not handle unevictable pages but CMA can do so */ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY; /* * To minimise LRU disruption, the caller can indicate that it only * wants to isolate pages it will be able to operate on without * blocking - clean pages for the most part. * * ISOLATE_CLEAN means that only clean pages should be isolated. This * is used by reclaim when it is cannot write to backing storage * * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages * that it is possible to migrate without blocking */ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { /* All the caller can do on PageWriteback is block */ if (PageWriteback(page)) return ret; if (PageDirty(page)) { struct address_space *mapping; /* ISOLATE_CLEAN means only clean pages */ if (mode & ISOLATE_CLEAN) return ret; /* * Only pages without mappings or that have a * ->migratepage callback are possible to migrate * without blocking */ mapping = page_mapping(page); if (mapping && !mapping->a_ops->migratepage) return ret; } } if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) return ret; if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); ret = 0; } return ret; } /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @lruvec: The LRU vector to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. * @sc: The scan_control struct for this reclaim session * @mode: One of the LRU isolation modes * @lru: LRU list id for isolating * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct lruvec *lruvec, struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, enum lru_list lru) { struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct page *page; int nr_pages; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); VM_BUG_ON(!PageLRU(page)); switch (__isolate_lru_page(page, mode)) { case 0: nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_move(&page->lru, dst); nr_taken += nr_pages; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); continue; default: BUG(); } } *nr_scanned = scan; trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, nr_taken, mode, is_file_lru(lru)); return nr_taken; } /** * isolate_lru_page - tries to isolate a page from its LRU list * @page: page to isolate from its LRU list * * Isolates a @page from an LRU list, clears PageLRU and adjusts the * vmstat statistic corresponding to whatever LRU list the page was on. * * Returns 0 if the page was removed from an LRU list. * Returns -EBUSY if the page was not on an LRU list. * * The returned page will have PageLRU() cleared. If it was found on * the active list, it will have PageActive set. If it was found on * the unevictable list, it will have the PageUnevictable bit set. That flag * may need to be cleared by the caller before letting the page go. * * The vmstat statistic corresponding to the list on which the page was * found will be decremented. * * Restrictions: * (1) Must be called with an elevated refcount on the page. This is a * fundamentnal difference from isolate_lru_pages (which is called * without a stable reference). * (2) the lru_lock must not be held. * (3) interrupts must be enabled. */ int isolate_lru_page(struct page *page) { int ret = -EBUSY; VM_BUG_ON(!page_count(page)); if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(page, zone); if (PageLRU(page)) { int lru = page_lru(page); get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, lru); ret = 0; } spin_unlock_irq(&zone->lru_lock); } return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (file) { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) inactive >>= 3; return isolated > inactive; } static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone *zone = lruvec_zone(lruvec); LIST_HEAD(pages_to_free); /* * Put back any unfreeable pages. */ while (!list_empty(page_list)) { struct page *page = lru_to_page(page_list); int lru; VM_BUG_ON(PageLRU(page)); list_del(&page->lru); if (unlikely(!page_evictable(page))) { spin_unlock_irq(&zone->lru_lock); putback_lru_page(page); spin_lock_irq(&zone->lru_lock); continue; } lruvec = mem_cgroup_page_lruvec(page, zone); SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(page, lruvec, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; } if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, &pages_to_free); } } /* * To save our caller's stack, now use input list for pages to free. */ list_splice(&pages_to_free, page_list); } /* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ static noinline_for_stack unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { LIST_HEAD(page_list); unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_writeback = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; } lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, &nr_scanned, sc, isolate_mode, lru); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); if (global_reclaim(sc)) { zone->pages_scanned += nr_scanned; if (current_is_kswapd()) __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); } spin_unlock_irq(&zone->lru_lock); if (nr_taken == 0) return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_writeback, false); spin_lock_irq(&zone->lru_lock); reclaim_stat->recent_scanned[file] += nr_taken; if (global_reclaim(sc)) { if (current_is_kswapd()) __count_zone_vm_events(PGSTEAL_KSWAPD, zone, nr_reclaimed); else __count_zone_vm_events(PGSTEAL_DIRECT, zone, nr_reclaimed); } putback_inactive_pages(lruvec, &page_list); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); free_hot_cold_page_list(&page_list, 1); /* * If reclaim is isolating dirty pages under writeback, it implies * that the long-lived page allocation rate is exceeding the page * laundering rate. Either the global limits are not being effective * at throttling processes due to the page distribution throughout * zones or there is heavy usage of a slow backing device. The * only option is to throttle from reclaim context which is not ideal * as there is no guarantee the dirtying process is throttled in the * same way balance_dirty_pages() manages. * * This scales the number of dirty pages that must be under writeback * before throttling depending on priority. It is a simple backoff * function that has the most effect in the range DEF_PRIORITY to * DEF_PRIORITY-2 which is the priority reclaim is considered to be * in trouble and reclaim is considered to be in trouble. * * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle * DEF_PRIORITY-1 50% must be PageWriteback * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble * ... * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any * isolated page is PageWriteback */ if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY - sc->priority))) wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, zone_idx(zone), nr_scanned, nr_reclaimed, sc->priority, trace_shrink_flags(file)); return nr_reclaimed; } /* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more * processes, from rmap. * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold zone->lru_lock across the whole operation. But if * the pages are mapped, the processing is slow (page_referenced()) so we * should drop zone->lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because * nobody will play with that bit on a non-LRU page. * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ static void move_active_pages_to_lru(struct lruvec *lruvec, struct list_head *list, struct list_head *pages_to_free, enum lru_list lru) { struct zone *zone = lruvec_zone(lruvec); unsigned long pgmoved = 0; struct page *page; int nr_pages; while (!list_empty(list)) { page = lru_to_page(list); lruvec = mem_cgroup_page_lruvec(page, zone); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, nr_pages); list_move(&page->lru, &lruvec->lists[lru]); pgmoved += nr_pages; if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, pages_to_free); } } __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); if (!is_active_lru(lru)) __count_vm_events(PGDEACTIVATE, pgmoved); } static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { unsigned long nr_taken; unsigned long nr_scanned; unsigned long vm_flags; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); struct page *page; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; unsigned long nr_rotated = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, isolate_mode, lru); if (global_reclaim(sc)) zone->pages_scanned += nr_scanned; reclaim_stat->recent_scanned[file] += nr_taken; __count_zone_vm_events(PGREFILL, zone, nr_scanned); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); list_del(&page->lru); if (unlikely(!page_evictable(page))) { putback_lru_page(page); continue; } if (unlikely(buffer_heads_over_limit)) { if (page_has_private(page) && trylock_page(page)) { if (page_has_private(page)) try_to_release_page(page, 0); unlock_page(page); } } if (page_referenced(page, 0, sc->target_mem_cgroup, &vm_flags)) { nr_rotated += hpage_nr_pages(page); /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So * that executable code get better chances to stay in * memory under moderate memory pressure. Anon pages * are not likely to be evicted by use-once streaming * IO, plus JVM can create lots of anon VM_EXEC pages, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { list_add(&page->lru, &l_active); continue; } } ClearPageActive(page); /* we are de-activating */ list_add(&page->lru, &l_inactive); } /* * Move pages back to the lru list. */ spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as rotated, * even though only some of them are actually re-activated. This * helps balance scan pressure between file and anonymous pages in * get_scan_ratio. */ reclaim_stat->recent_rotated[file] += nr_rotated; move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); free_hot_cold_page_list(&l_hold, 1); } #ifdef CONFIG_SWAP static int inactive_anon_is_low_global(struct zone *zone) { unsigned long active, inactive; active = zone_page_state(zone, NR_ACTIVE_ANON); inactive = zone_page_state(zone, NR_INACTIVE_ANON); if (inactive * zone->inactive_ratio < active) return 1; return 0; } /** * inactive_anon_is_low - check if anonymous pages need to be deactivated * @lruvec: LRU vector to check * * Returns true if the zone does not have enough inactive anon pages, * meaning some active anon pages need to be deactivated. */ static int inactive_anon_is_low(struct lruvec *lruvec) { /* * If we don't have swap space, anonymous page deactivation * is pointless. */ if (!total_swap_pages) return 0; if (!mem_cgroup_disabled()) return mem_cgroup_inactive_anon_is_low(lruvec); return inactive_anon_is_low_global(lruvec_zone(lruvec)); } #else static inline int inactive_anon_is_low(struct lruvec *lruvec) { return 0; } #endif /** * inactive_file_is_low - check if file pages need to be deactivated * @lruvec: LRU vector to check * * When the system is doing streaming IO, memory pressure here * ensures that active file pages get deactivated, until more * than half of the file pages are on the inactive list. * * Once we get to that situation, protect the system's working * set from being evicted by disabling active file page aging. * * This uses a different ratio than the anonymous pages, because * the page cache uses a use-once replacement algorithm. */ static int inactive_file_is_low(struct lruvec *lruvec) { unsigned long inactive; unsigned long active; inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE); active = get_lru_size(lruvec, LRU_ACTIVE_FILE); return active > inactive; } static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru) { if (is_file_lru(lru)) return inactive_file_is_low(lruvec); else return inactive_anon_is_low(lruvec); } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { if (is_active_lru(lru)) { if (inactive_list_is_low(lruvec, lru)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); } static int vmscan_swappiness(struct scan_control *sc) { if (global_reclaim(sc)) return vm_swappiness; return mem_cgroup_swappiness(sc->target_mem_cgroup); } enum scan_balance { SCAN_EQUAL, SCAN_FRACT, SCAN_ANON, SCAN_FILE, }; /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined * by looking at the fraction of the pages scanned we did rotate back * onto the active list instead of evict. * * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan */ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long *nr) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; u64 fraction[2]; u64 denominator = 0; /* gcc */ struct zone *zone = lruvec_zone(lruvec); unsigned long anon_prio, file_prio; enum scan_balance scan_balance; unsigned long anon, file, free; bool force_scan = false; unsigned long ap, fp; enum lru_list lru; /* * If the zone or memcg is small, nr[l] can be 0. This * results in no scanning on this priority and a potential * priority drop. Global direct reclaim can go to the next * zone and tends to have no problems. Global kswapd is for * zone balancing and it needs to scan a minimum amount. When * reclaiming for a memcg, a priority drop can cause high * latencies, so it's better to scan a minimum amount there as * well. */ if (current_is_kswapd() && zone->all_unreclaimable) force_scan = true; if (!global_reclaim(sc)) force_scan = true; /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { scan_balance = SCAN_FILE; goto out; } /* * Global reclaim will swap to prevent OOM even with no * swappiness, but memcg users want to use this knob to * disable swapping for individual groups completely when * using the memory controller's swap limit feature would be * too expensive. */ if (!global_reclaim(sc) && !vmscan_swappiness(sc)) { scan_balance = SCAN_FILE; goto out; } /* * Do not apply any pressure balancing cleverness when the * system is close to OOM, scan both anon and file equally * (unless the swappiness setting disagrees with swapping). */ if (!sc->priority && vmscan_swappiness(sc)) { scan_balance = SCAN_EQUAL; goto out; } anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + get_lru_size(lruvec, LRU_INACTIVE_ANON); file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + get_lru_size(lruvec, LRU_INACTIVE_FILE); /* * If it's foreseeable that reclaiming the file cache won't be * enough to get the zone back into a desirable shape, we have * to swap. Better start now and leave the - probably heavily * thrashing - remaining file pages alone. */ if (global_reclaim(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); if (unlikely(file + free <= high_wmark_pages(zone))) { scan_balance = SCAN_ANON; goto out; } } /* * There is enough inactive page cache, do not reclaim * anything from the anonymous working set right now. */ if (!inactive_file_is_low(lruvec)) { scan_balance = SCAN_FILE; goto out; } scan_balance = SCAN_FRACT; /* * With swappiness at 100, anonymous and file have the same priority. * This scanning priority is essentially the inverse of IO cost. */ anon_prio = vmscan_swappiness(sc); file_prio = 200 - anon_prio; /* * OK, so we have swap space and a fair amount of page cache * pages. We use the recently rotated / recently scanned * ratios to determine how valuable each cache is. * * Because workloads change over time (and to avoid overflow) * we keep these statistics as a floating average, which ends * up weighing recent references more than old ones. * * anon in [0], file in [1] */ spin_lock_irq(&zone->lru_lock); if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_rotated[0] /= 2; } if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { reclaim_stat->recent_scanned[1] /= 2; reclaim_stat->recent_rotated[1] /= 2; } /* * The amount of pressure on anon vs file pages is inversely * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); ap /= reclaim_stat->recent_rotated[0] + 1; fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; spin_unlock_irq(&zone->lru_lock); fraction[0] = ap; fraction[1] = fp; denominator = ap + fp + 1; out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long size; unsigned long scan; size = get_lru_size(lruvec, lru); scan = size >> sc->priority; if (!scan && force_scan) scan = min(size, SWAP_CLUSTER_MAX); switch (scan_balance) { case SCAN_EQUAL: /* Scan lists relative to size */ break; case SCAN_FRACT: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. */ scan = div64_u64(scan * fraction[file], denominator); break; case SCAN_FILE: case SCAN_ANON: /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) scan = 0; break; default: /* Look ma, no brain */ BUG(); } nr[lru] = scan; } } /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct blk_plug plug; get_scan_count(lruvec, sc, nr); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { for_each_evictable_lru(lru) { if (nr[lru]) { nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); } } /* * On large memory systems, scan >> priority can become * really large. This is fine for the starting priority; * we want to put equal scanning pressure on each zone. * However, if the VM has a harder time of freeing pages, * with multiple processes reclaiming pages, the total * freeing target can get unreasonably large. */ if (nr_reclaimed >= nr_to_reclaim && sc->priority < DEF_PRIORITY) break; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); throttle_vm_writeout(sc->gfp_mask); } /* Use reclaim/compaction for costly allocs or under memory pressure */ static bool in_reclaim_compaction(struct scan_control *sc) { if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && (sc->order > PAGE_ALLOC_COSTLY_ORDER || sc->priority < DEF_PRIORITY - 2)) return true; return false; } /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns * true if more pages should be reclaimed such that when the page allocator * calls try_to_compact_zone() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ static inline bool should_continue_reclaim(struct zone *zone, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) { unsigned long pages_for_compaction; unsigned long inactive_lru_pages; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) return false; /* Consider stopping depending on scan and reclaim activity */ if (sc->gfp_mask & __GFP_REPEAT) { /* * For __GFP_REPEAT allocations, stop reclaiming if the * full LRU list has been scanned and we are still failing * to reclaim pages. This full LRU scan is potentially * expensive but a __GFP_REPEAT caller really wants to succeed */ if (!nr_reclaimed && !nr_scanned) return false; } else { /* * For non-__GFP_REPEAT allocations which can presumably * fail without consequence, stop if we failed to reclaim * any pages from the last SWAP_CLUSTER_MAX number of * pages that were scanned. This will return to the * caller faster at the risk reclaim/compaction and * the resulting allocation attempt fails */ if (!nr_reclaimed) return false; } /* * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); if (sc->nr_reclaimed < pages_for_compaction && inactive_lru_pages > pages_for_compaction) return true; /* If compaction would go ahead or the allocation would succeed, stop */ switch (compaction_suitable(zone, sc->order)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; default: return true; } } static void shrink_zone(struct zone *zone, struct scan_control *sc) { unsigned long nr_reclaimed, nr_scanned; do { struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup_reclaim_cookie reclaim = { .zone = zone, .priority = sc->priority, }; struct mem_cgroup *memcg; nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; memcg = mem_cgroup_iter(root, NULL, &reclaim); do { struct lruvec *lruvec; lruvec = mem_cgroup_zone_lruvec(zone, memcg); shrink_lruvec(lruvec, sc); /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the * zone. * * Limit reclaim, on the other hand, only cares about * nr_to_reclaim pages to be reclaimed and it will * retry with decreasing priority if one round over the * whole hierarchy is not sufficient. */ if (!global_reclaim(sc) && sc->nr_reclaimed >= sc->nr_to_reclaim) { mem_cgroup_iter_break(root, memcg); break; } memcg = mem_cgroup_iter(root, memcg, &reclaim); } while (memcg); vmpressure(sc->gfp_mask, sc->target_mem_cgroup, sc->nr_scanned - nr_scanned, sc->nr_reclaimed - nr_reclaimed); } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, sc->nr_scanned - nr_scanned, sc)); } /* Returns true if compaction should go ahead for a high-order request */ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long balance_gap, watermark; bool watermark_ok; /* Do not consider compaction for orders reclaim is meant to satisfy */ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER) return false; /* * Compaction takes time to run and there are potentially other * callers using the pages just freed. Continue reclaiming until * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ balance_gap = min(low_wmark_pages(zone), (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); /* * If compaction is deferred, reclaim up to a point where * compaction will have a chance of success when re-enabled */ if (compaction_deferred(zone, sc->order)) return watermark_ok; /* If compaction is not ready to start, keep reclaiming */ if (!compaction_suitable(zone, sc->order)) return false; return watermark_ok; } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * We reclaim from a zone even if that zone is over high_wmark_pages(zone). * Because: * a) The caller may be trying to free *extra* pages to satisfy a higher-order * allocation or * b) The target zone may be at high_wmark_pages(zone) but the lower zones * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' * zone defense algorithm. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. * * This function returns true if a zone is being reclaimed for a costly * high-order allocation and compaction is ready to begin. This indicates to * the caller that it should consider retrying the allocation instead of * further reclaim. */ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; bool aborted_reclaim = false; /* * If the number of buffer_heads in the machine exceeds the maximum * allowed level, force direct reclaim to scan the highmem zone as * highmem pages could be pinning lowmem pages storing buffer_heads */ if (buffer_heads_over_limit) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { if (!populated_zone(zone)) continue; /* * Take care memory controller reclaiming has small influence * to global LRU. */ if (global_reclaim(sc)) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ if (IS_ENABLED(CONFIG_COMPACTION)) { /* * If we already have plenty of memory free for * compaction in this zone, don't free any more. * Even though compaction is invoked for any * non-zero order, only frequent costly order * reclamation is disruptive enough to become a * noticeable problem, like transparent huge * page allocations. */ if (compaction_ready(zone, sc)) { aborted_reclaim = true; continue; } } /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and * scanned pages. This works for global memory pressure * and balancing, not for a memcg's limit. */ nr_soft_scanned = 0; nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, sc->order, sc->gfp_mask, &nr_soft_scanned); sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_scanned += nr_soft_scanned; /* need some check for avoid more shrink_zone() */ } shrink_zone(zone, sc); } return aborted_reclaim; } static unsigned long zone_reclaimable_pages(struct zone *zone) { int nr; nr = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) nr += zone_page_state(zone, NR_ACTIVE_ANON) + zone_page_state(zone, NR_INACTIVE_ANON); return nr; } static bool zone_reclaimable(struct zone *zone) { return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; } /* All zones in zonelist are unreclaimable? */ static bool all_unreclaimable(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { if (!populated_zone(zone)) continue; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; if (!zone->all_unreclaimable) return false; } return true; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick the writeback threads and take explicit * naps in the hope that some of these pages can be written. But if the * allocating task holds filesystem locks which prevent writeout this might not * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc, struct shrink_control *shrink) { unsigned long total_scanned = 0; struct reclaim_state *reclaim_state = current->reclaim_state; struct zoneref *z; struct zone *zone; unsigned long writeback_threshold; bool aborted_reclaim; delayacct_freepages_start(); if (global_reclaim(sc)) count_vm_event(ALLOCSTALL); do { vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); sc->nr_scanned = 0; aborted_reclaim = shrink_zones(zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups */ if (global_reclaim(sc)) { unsigned long lru_pages = 0; for_each_zone_zonelist(zone, z, zonelist, gfp_zone(sc->gfp_mask)) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; lru_pages += zone_reclaimable_pages(zone); } shrink_slab(shrink, sc->nr_scanned, lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } } total_scanned += sc->nr_scanned; if (sc->nr_reclaimed >= sc->nr_to_reclaim) goto out; /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc->priority < DEF_PRIORITY - 2) sc->may_writepage = 1; /* * Try to write back as many pages as we just scanned. This * tends to cause slow streaming writers to write data to the * disk smoothly, at the dirtying rate, which is nice. But * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; if (total_scanned > writeback_threshold) { wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, WB_REASON_TRY_TO_FREE_PAGES); sc->may_writepage = 1; } /* Take a nap, wait for some writeback to complete */ if (!sc->hibernation_mode && sc->nr_scanned && sc->priority < DEF_PRIORITY - 2) { struct zone *preferred_zone; first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), &cpuset_current_mems_allowed, &preferred_zone); wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); } } while (--sc->priority >= 0); out: delayacct_freepages_end(); if (sc->nr_reclaimed) return sc->nr_reclaimed; /* * As hibernation is going on, kswapd is freezed so that it can't mark * the zone into all_unreclaimable. Thus bypassing all_unreclaimable * check. */ if (oom_killer_disabled) return 0; /* Aborted reclaim to try compaction? don't OOM, then */ if (aborted_reclaim) return 1; /* top priority shrink_zones still had more to do? don't OOM, then */ if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) return 1; return 0; } static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; unsigned long free_pages = 0; int i; bool wmark_ok; for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; if (!populated_zone(zone)) continue; pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ if (!pfmemalloc_reserve) return true; wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { pgdat->classzone_idx = min(pgdat->classzone_idx, (enum zone_type)ZONE_NORMAL); wake_up_interruptible(&pgdat->kswapd_wait); } return wmark_ok; } /* * Throttle direct reclaimers if backing storage is backed by the network * and the PFMEMALLOC reserve for the preferred node is getting dangerously * depleted. kswapd will continue to make progress and wake the processes * when the low watermark is reached. * * Returns true if a fatal signal was delivered during throttling. If this * happens, the page allocator should not consider triggering the OOM killer. */ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly * responsible for cleaning pages necessary for reclaim to make forward * progress. kjournald for example may enter direct reclaim while * committing a transaction where throttling it could forcing other * processes to block on log_wait_commit(). */ if (current->flags & PF_KTHREAD) goto out; /* * If a fatal signal is pending, this process should not throttle. * It should return quickly so it can exit and free its memory */ if (fatal_signal_pending(current)) goto out; /* * Check if the pfmemalloc reserves are ok by finding the first node * with a usable ZONE_NORMAL or lower zone. The expectation is that * GFP_KERNEL will be required for allocating network buffers when * swapping over the network so ZONE_HIGHMEM is unusable. * * Throttling is based on the first usable node and throttled processes * wait on a queue until kswapd makes progress and wakes them. There * is an affinity then between processes waking up and where reclaim * progress has been made assuming the process wakes on the same node. * More importantly, processes running on remote nodes will not compete * for remote pfmemalloc reserves and processes on different nodes * should make reasonable progress. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_mask, nodemask) { if (zone_idx(zone) > ZONE_NORMAL) continue; /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; if (pfmemalloc_watermark_ok(pgdat)) goto out; break; } /* If no zone was usable by the allocation flags then do not throttle */ if (!pgdat) goto out; /* Account for the throttling */ count_vm_event(PGSCAN_DIRECT_THROTTLE); /* * If the caller cannot enter the filesystem, it's possible that it * is due to the caller holding an FS lock or performing a journal * transaction in the case of a filesystem like ext[3|4]. In this case, * it is not safe to block on pfmemalloc_wait as kswapd could be * blocked waiting on the same lock. Instead, throttle for up to a * second before continuing. */ if (!(gfp_mask & __GFP_FS)) { wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat), HZ); goto check_pending; } /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat)); check_pending: if (fatal_signal_pending(current)) return true; out: return false; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .may_writepage = !laptop_mode, .nr_to_reclaim = SWAP_CLUSTER_MAX, .may_unmap = 1, .may_swap = 1, .order = order, .priority = DEF_PRIORITY, .target_mem_cgroup = NULL, .nodemask = nodemask, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; /* * Do not enter reclaim if fatal signal was delivered while throttled. * 1 is returned so that the page allocator does not OOM kill at this * point. */ if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) return 1; trace_mm_vmscan_direct_reclaim_begin(order, sc.may_writepage, gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); return nr_reclaimed; } #ifdef CONFIG_MEMCG unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, struct zone *zone, unsigned long *nr_scanned) { struct scan_control sc = { .nr_scanned = 0, .nr_to_reclaim = SWAP_CLUSTER_MAX, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, .order = 0, .priority = 0, .target_mem_cgroup = memcg, }; struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.may_writepage, sc.gfp_mask); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. * if we don't reclaim here, the shrink_zone from balance_pgdat * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ shrink_lruvec(lruvec, &sc); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, .nr_to_reclaim = SWAP_CLUSTER_MAX, .order = 0, .priority = DEF_PRIORITY, .target_mem_cgroup = memcg, .nodemask = NULL, /* we don't care the placement */ .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the * scan does not need to be the current node. */ nid = mem_cgroup_select_victim_node(memcg); zonelist = NODE_DATA(nid)->node_zonelists; trace_mm_vmscan_memcg_reclaim_begin(0, sc.may_writepage, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); return nr_reclaimed; } #endif static void age_active_anon(struct zone *zone, struct scan_control *sc) { struct mem_cgroup *memcg; if (!total_swap_pages) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); memcg = mem_cgroup_iter(NULL, memcg, NULL); } while (memcg); } static bool zone_balanced(struct zone *zone, int order, unsigned long balance_gap, int classzone_idx) { if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + balance_gap, classzone_idx, 0)) return false; if (IS_ENABLED(CONFIG_COMPACTION) && order && !compaction_suitable(zone, order)) return false; return true; } /* * pgdat_balanced() is used when checking if a node is balanced. * * For order-0, all zones must be balanced! * * For high-order allocations only zones that meet watermarks and are in a * zone allowed by the callers classzone_idx are added to balanced_pages. The * total of balanced pages must be at least 25% of the zones allowed by * classzone_idx for the node to be considered balanced. Forcing all zones to * be balanced for high orders can cause excessive reclaim when there are * imbalanced zones. * The choice of 25% is due to * o a 16M DMA zone that is balanced will not balance a zone on any * reasonable sized machine * o On all other machines, the top zone must be at least a reasonable * percentage of the middle zones. For example, on 32-bit x86, highmem * would need to be at least 256M for it to be balance a whole node. * Similarly, on x86-64 the Normal zone would need to be at least 1G * to balance a node on its own. These seemed like reasonable ratios. */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) { unsigned long managed_pages = 0; unsigned long balanced_pages = 0; int i; /* Check the watermark levels */ for (i = 0; i <= classzone_idx; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; managed_pages += zone->managed_pages; /* * A special case here: * * balance_pgdat() skips over all_unreclaimable after * DEF_PRIORITY. Effectively, it considers them balanced so * they must be considered balanced here as well! */ if (zone->all_unreclaimable) { balanced_pages += zone->managed_pages; continue; } if (zone_balanced(zone, order, 0, i)) balanced_pages += zone->managed_pages; else if (!order) return false; } if (order) return balanced_pages >= (managed_pages >> 2); else return true; } /* * Prepare kswapd for sleeping. This verifies that there are no processes * waiting in throttle_direct_reclaim() and that watermarks have been met. * * Returns true if kswapd is ready to sleep */ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, int classzone_idx) { /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return false; /* * The throttled processes are normally woken up in balance_pgdat() as * soon as pfmemalloc_watermark_ok() is true. But there is a potential * race between when kswapd checks the watermarks and a process gets * throttled. There is also a potential race if processes get * throttled, kswapd wakes, a large process exits thereby balancing the * zones, which causes kswapd to exit balance_pgdat() before reaching * the wake up checks. If kswapd is going to sleep, no process should * be sleeping on pfmemalloc_wait, so wake them now if necessary. If * the wake up is premature, processes will wake kswapd and get * throttled again. The difference from wake ups in balance_pgdat() is * that here we are under prepare_to_wait(). */ if (waitqueue_active(&pgdat->pfmemalloc_wait)) wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). * * Returns the final order kswapd was reclaiming at * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. * What we do is to detect the case where all pages in the zone have been * scanned twice and there has been zero successful reclaim. Mark the zone as * dead and from now on, only perform a short scan. Basically we're polling * the zone for when the problem goes away. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > high_wmark_pages(zone), but once a zone is * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the * lower zones regardless of the number of free pages in the lower zones. This * interoperates with the page allocator fallback scheme to ensure that aging * of pages is balanced across the zones. */ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, int *classzone_idx) { bool pgdat_is_balanced = false; int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_unmap = 1, .may_swap = 1, /* * kswapd doesn't want to be bailed out while reclaim. because * we want to put equal scanning pressure on each zone. */ .nr_to_reclaim = ULONG_MAX, .order = order, .target_mem_cgroup = NULL, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; loop_again: sc.priority = DEF_PRIORITY; sc.nr_reclaimed = 0; sc.may_writepage = !laptop_mode; count_vm_event(PAGEOUTRUN); do { unsigned long lru_pages = 0; /* * Scan in the highmem->dma direction for the highest * zone which needs scanning */ for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && sc.priority != DEF_PRIORITY) continue; /* * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ age_active_anon(zone, &sc); /* * If the number of buffer_heads in the machine * exceeds the maximum allowed level and this node * has a highmem zone, force kswapd to reclaim from * it to relieve lowmem pressure. */ if (buffer_heads_over_limit && is_highmem_idx(i)) { end_zone = i; break; } if (!zone_balanced(zone, order, 0, 0)) { end_zone = i; break; } else { /* If balanced, clear the congested flag */ zone_clear_flag(zone, ZONE_CONGESTED); } } if (i < 0) { pgdat_is_balanced = true; goto out; } for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; lru_pages += zone_reclaimable_pages(zone); } /* * Now scan the zone in the dma->highmem direction, stopping * at the last zone which needs scanning. * * We do this because the page allocator works in the opposite * direction. This prevents the page allocator from allocating * pages behind kswapd's direction of progress, which would * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; int nr_slab, testorder; unsigned long balance_gap; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && sc.priority != DEF_PRIORITY) continue; sc.nr_scanned = 0; nr_soft_scanned = 0; /* * Call soft limit reclaim before calling shrink_zone. */ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask, &nr_soft_scanned); sc.nr_reclaimed += nr_soft_reclaimed; /* * We put equal pressure on every zone, unless * one zone has way too many pages free * already. The "too many pages" is defined * as the high wmark plus a "gap" where the * gap is either the low watermark or 1% * of the zone, whichever is smaller. */ balance_gap = min(low_wmark_pages(zone), (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); /* * Kswapd reclaims only single pages with compaction * enabled. Trying too hard to reclaim until contiguous * free pages have become available can hurt performance * by evicting too much useful data from memory. * Do not reclaim more than needed for compaction. */ testorder = order; if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, order) != COMPACT_SKIPPED) testorder = 0; if ((buffer_heads_over_limit && is_highmem_idx(i)) || !zone_balanced(zone, testorder, balance_gap, end_zone)) { shrink_zone(zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; } /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; if (zone->all_unreclaimable) { if (end_zone && end_zone == i) end_zone--; continue; } if (zone_balanced(zone, testorder, 0, end_zone)) /* * If a zone reaches its high watermark, * consider it to be no longer congested. It's * possible there are dirty pages backed by * congested BDIs but as pressure is relieved, * speculatively avoid congestion waits */ zone_clear_flag(zone, ZONE_CONGESTED); } /* * If the low watermark is met there is no need for processes * to be throttled on pfmemalloc_wait as they should not be * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && pfmemalloc_watermark_ok(pgdat)) wake_up(&pgdat->pfmemalloc_wait); if (pgdat_balanced(pgdat, order, *classzone_idx)) { pgdat_is_balanced = true; break; /* kswapd: all done */ } /* * We do this so kswapd doesn't build up large priorities for * example when it is freeing in parallel with allocators. It * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) break; } while (--sc.priority >= 0); out: if (!pgdat_is_balanced) { cond_resched(); try_to_freeze(); /* * Fragmentation may mean that the system cannot be * rebalanced for high-order allocations in all zones. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, * it means the zones have been fully scanned and are still * not balanced. For high-order allocations, there is * little point trying all over again as kswapd may * infinite loop. * * Instead, recheck all watermarks at order-0 as they * are the most important. If watermarks are ok, kswapd will go * back to sleep. High-order users can still perform direct * reclaim if they wish. */ if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) order = sc.order = 0; goto loop_again; } /* * If kswapd was reclaiming at a higher order, it has the option of * sleeping without all zones being balanced. Before it does, it must * ensure that the watermarks for order-0 on *all* zones are met and * that the congestion flags are cleared. The congestion flag must * be cleared as kswapd is the only mechanism that clears the flag * and it is potentially going to sleep here. */ if (order) { int zones_need_compaction = 1; for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; /* Check if the memory needs to be defragmented. */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), *classzone_idx, 0)) zones_need_compaction = 0; } if (zones_need_compaction) compact_pgdat(pgdat, order); } /* * Return the order we were reclaiming at so prepare_kswapd_sleep() * makes a decision on the order we were last reclaiming at. However, * if another caller entered the allocator slow path while kswapd * was awake, order will remain at the higher level */ *classzone_idx = end_zone; return order; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); /* Try to sleep for a short interval */ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ reset_isolation_suitable(pgdat); if (!kthread_should_stop()) schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } finish_wait(&pgdat->kswapd_wait, &wait); } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned long order, new_order; unsigned balanced_order; int classzone_idx, new_classzone_idx; int balanced_classzone_idx; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); order = new_order = 0; balanced_order = 0; classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; balanced_classzone_idx = classzone_idx; for ( ; ; ) { bool ret; /* * If the last balance_pgdat was unsuccessful it's unlikely a * new request of a similar or harder type will succeed soon * so consider going to sleep on the basis we reclaimed at */ if (balanced_classzone_idx >= new_classzone_idx && balanced_order == new_order) { new_order = pgdat->kswapd_max_order; new_classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' * allocation or has tigher zone constraints */ order = new_order; classzone_idx = new_classzone_idx; } else { kswapd_try_to_sleep(pgdat, balanced_order, balanced_classzone_idx); order = pgdat->kswapd_max_order; classzone_idx = pgdat->classzone_idx; new_order = order; new_classzone_idx = classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } ret = try_to_freeze(); if (kthread_should_stop()) break; /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (!ret) { trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); balanced_classzone_idx = classzone_idx; balanced_order = balance_pgdat(pgdat, order, &balanced_classzone_idx); } } tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); return 0; } /* * A zone is low on free memory, so wake its kswapd task to service it. */ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; if (!populated_zone(zone)) return; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) return; pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) { pgdat->kswapd_max_order = order; pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); } if (!waitqueue_active(&pgdat->kswapd_wait)) return; if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) return; trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { struct reclaim_state reclaim_state; struct scan_control sc = { .gfp_mask = GFP_HIGHUSER_MOVABLE, .may_swap = 1, .may_unmap = 1, .may_writepage = 1, .nr_to_reclaim = nr_to_reclaim, .hibernation_mode = 1, .order = 0, .priority = DEF_PRIORITY, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); struct task_struct *p = current; unsigned long nr_reclaimed; p->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(sc.gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); p->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); p->flags &= ~PF_MEMALLOC; return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ set_cpus_allowed_ptr(pgdat->kswapd, mask); } } return NOTIFY_OK; } /* * This kswapd start function will be called by init and node-hot-add. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. */ int kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int ret = 0; if (pgdat->kswapd) return 0; pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); pr_err("Failed to start kswapd on node %d\n", nid); ret = PTR_ERR(pgdat->kswapd); pgdat->kswapd = NULL; } return ret; } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * hold lock_memory_hotplug(). */ void kswapd_stop(int nid) { struct task_struct *kswapd = NODE_DATA(nid)->kswapd; if (kswapd) { kthread_stop(kswapd); NODE_DATA(nid)->kswapd = NULL; } } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Zone reclaim mode * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. */ int zone_reclaim_mode __read_mostly; #define RECLAIM_OFF 0 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ /* * Priority for ZONE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define ZONE_RECLAIM_PRIORITY 4 /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to * occur. */ int sysctl_min_unmapped_ratio = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int sysctl_min_slab_ratio = 5; static inline unsigned long zone_unmapped_file_pages(struct zone *zone) { unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_ACTIVE_FILE); /* * It's possible for there to be more file mapped pages than * accounted for by the pages on the file LRU lists because * tmpfs pages accounted for as ANON can also be FILE_MAPPED */ return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; } /* Work out how many page cache pages we can reclaim in this reclaim_mode */ static long zone_pagecache_reclaimable(struct zone *zone) { long nr_pagecache_reclaimable; long delta = 0; /* * If RECLAIM_SWAP is set, then all file pages are considered * potentially reclaimable. Otherwise, we have to worry about * pages like swapcache and zone_unmapped_file_pages() provides * a better estimate */ if (zone_reclaim_mode & RECLAIM_SWAP) nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); else nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); /* If we can't clean pages, remove dirty pages from consideration */ if (!(zone_reclaim_mode & RECLAIM_WRITE)) delta += zone_page_state(zone, NR_FILE_DIRTY); /* Watch for any possible underflows due to delta */ if (unlikely(delta > nr_pagecache_reclaimable)) delta = nr_pagecache_reclaimable; return nr_pagecache_reclaimable - delta; } /* * Try to free up some pages from this zone through reclaim. */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), .may_swap = 1, .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .priority = ZONE_RECLAIM_PRIORITY, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; unsigned long nr_slab_pages0, nr_slab_pages1; cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_SWAP * and we also need to be able to write out pages for RECLAIM_WRITE * and RECLAIM_SWAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { /* * Free memory by calling shrink zone with increasing * priorities until we have enough memory freed. */ do { shrink_zone(zone, &sc); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages0 > zone->min_slab_pages) { /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current * number of slab pages and shake the slab until it is reduced * by the same nr_pages that we used for reclaiming unmapped * pages. * * Note that shrink_slab will free memory on all zones and may * take a long time. */ for (;;) { unsigned long lru_pages = zone_reclaimable_pages(zone); /* No reclaimable slab or very low memory pressure */ if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) break; /* Freed enough memory */ nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) break; } /* * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages1 < nr_slab_pages0) sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); lockdep_clear_current_reclaim_state(); return sc.nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { int node_id; int ret; /* * Zone reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the zone is overallocated. So we do not reclaim * if less than a specified percentage of the zone is used by * unmapped file backed pages. */ if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; if (zone->all_unreclaimable) return ZONE_RECLAIM_FULL; /* * Do not scan if the allocation should not be delayed. */ if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) return ZONE_RECLAIM_NOSCAN; /* * Only run zone reclaim on the local zone or on zones that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ node_id = zone_to_nid(zone); if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); return ret; } #endif /* * page_evictable - test whether a page is evictable * @page: the page to test * * Test whether page is evictable--i.e., should be placed on active/inactive * lists vs unevictable list. * * Reasons page might not be evictable: * (1) page's mapping marked unevictable * (2) page is part of an mlocked VMA * */ int page_evictable(struct page *page) { return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); } #ifdef CONFIG_SHMEM /** * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list * @pages: array of pages to check * @nr_pages: number of pages to check * * Checks pages for evictability and moves them to the appropriate lru list. * * This function is only used for SysV IPC SHM_UNLOCK. */ void check_move_unevictable_pages(struct page **pages, int nr_pages) { struct lruvec *lruvec; struct zone *zone = NULL; int pgscanned = 0; int pgrescued = 0; int i; for (i = 0; i < nr_pages; i++) { struct page *page = pages[i]; struct zone *pagezone; pgscanned++; pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } lruvec = mem_cgroup_page_lruvec(page, zone); if (!PageLRU(page) || !PageUnevictable(page)) continue; if (page_evictable(page)) { enum lru_list lru = page_lru_base_type(page); VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec, lru); pgrescued++; } } if (zone) { __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); spin_unlock_irq(&zone->lru_lock); } } #endif /* CONFIG_SHMEM */ static void warn_scan_unevictable_pages(void) { printk_once(KERN_WARNING "%s: The scan_unevictable_pages sysctl/node-interface has been " "disabled for lack of a legitimate use case. If you have " "one, please send an email to linux-mm@kvack.org.\n", current->comm); } /* * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of * all nodes' unevictable lists for evictable pages */ unsigned long scan_unevictable_pages; int scan_unevictable_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { warn_scan_unevictable_pages(); proc_doulongvec_minmax(table, write, buffer, length, ppos); scan_unevictable_pages = 0; return 0; } #ifdef CONFIG_NUMA /* * per node 'scan_unevictable_pages' attribute. On demand re-scan of * a specified node's per zone unevictable lists for evictable pages. */ static ssize_t read_scan_unevictable_node(struct device *dev, struct device_attribute *attr, char *buf) { warn_scan_unevictable_pages(); return sprintf(buf, "0\n"); /* always zero; should fit... */ } static ssize_t write_scan_unevictable_node(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { warn_scan_unevictable_pages(); return 1; } static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, read_scan_unevictable_node, write_scan_unevictable_node); int scan_unevictable_register_node(struct node *node) { return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); } void scan_unevictable_unregister_node(struct node *node) { device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); } #endif
artefvck/X_Artefvck
mm/vmscan.c
C
gpl-2.0
104,394
/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" enum { MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, MAX_MISSES = 3, }; enum { MLX5_HEALTH_SYNDR_FW_ERR = 0x1, MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, MLX5_HEALTH_SYNDR_EQ_INV = 0xe, MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10 }; enum { MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_DISABLED = 1, MLX5_NIC_IFC_NO_DRAM_NIC = 2, MLX5_NIC_IFC_INVALID = 3 }; enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, }; static u8 get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } static void trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long flags; u64 vector; /* wait for pending handlers to complete */ synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD)); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); if (!vector) goto no_trig; vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_cmd_comp_handler(dev, vector, true); return; no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } static int in_fatal(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) return 1; return 0; } void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; trigger_cmd_completions(dev); } mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); mlx5_core_err(dev, "end\n"); unlock: mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { u8 nic_interface = get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); break; case MLX5_NIC_IFC_DISABLED: mlx5_core_warn(dev, "starting teardown\n"); break; case MLX5_NIC_IFC_NO_DRAM_NIC: mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); break; default: mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n", nic_interface); } mlx5_disable_device(dev); } static void health_recover(struct work_struct *work) { struct mlx5_core_health *health; struct delayed_work *dwork; struct mlx5_core_dev *dev; struct mlx5_priv *priv; u8 nic_state; dwork = container_of(work, struct delayed_work, work); health = container_of(dwork, struct mlx5_core_health, recover_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); nic_state = get_nic_state(dev); if (nic_state == MLX5_NIC_IFC_INVALID) { dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); return; } dev_err(&dev->pdev->dev, "starting health recovery flow\n"); mlx5_recover_device(dev); } /* How much time to wait until health resetting the driver (in msecs) */ #define MLX5_RECOVERY_DELAY_MSECS 60000 static void health_care(struct work_struct *work) { unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS); struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; unsigned long flags; health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static const char *hsynd_str(u8 synd) { switch (synd) { case MLX5_HEALTH_SYNDR_FW_ERR: return "firmware internal error"; case MLX5_HEALTH_SYNDR_IRISC_ERR: return "irisc not responding"; case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: return "unrecoverable hardware error"; case MLX5_HEALTH_SYNDR_CRC_ERR: return "firmware CRC error"; case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: return "ICM fetch PCI error"; case MLX5_HEALTH_SYNDR_HW_FTL_ERR: return "HW fatal error\n"; case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: return "async EQ buffer overrun"; case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: return "High temperature"; default: return "unrecognized error"; } } static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; char fw_str[18]; u32 fw; int i; /* If the syndrom is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + MLX5_HEALTH_POLL_INTERVAL; return next; } void mlx5_trigger_health_work(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->work); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static void poll_health(struct timer_list *t) { struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); struct mlx5_core_health *health = &dev->priv.health; u32 count; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); } if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); mlx5_trigger_health_work(dev); } out: mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; del_timer_sync(&health->timer); } void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&dev->priv.health.recover_work); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); } int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char *name; health = &dev->priv.health; name = kmalloc(64, GFP_KERNEL); if (!name) return -ENOMEM; strcpy(name, "mlx5_health"); strcat(name, dev_name(&dev->pdev->dev)); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) return -ENOMEM; spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); INIT_DELAYED_WORK(&health->recover_work, health_recover); return 0; }
michael2012z/myKernel
drivers/net/ethernet/mellanox/mlx5/core/health.c
C
gpl-2.0
11,503
/* { dg-do compile } */ /* { dg-require-effective-target arm_dsp } */ /* Ensure the smlatb doesn't get generated when reading the Q flag from ACLE. */ #include <arm_acle.h> int foo (int x, int in, int32_t c) { short a = in & 0xffff; short b = (in & 0xffff0000) >> 16; int res = x + b * a + __ssat (c, 24); return res + __saturation_occurred (); } /* { dg-final { scan-assembler-not "smlatb\\t" } } */
Gurgel100/gcc
gcc/testsuite/gcc.target/arm/acle/sat_no_smlatb.c
C
gpl-2.0
421
/* * File : touch.c * This file is part of RT-Thread RTOS * COPYRIGHT (C) 2010 - 2012, RT-Thread Develop Team * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rt-thread.org/license/LICENSE * * Change Logs: * Date Author Notes * 2010-01-01 Yi.Qiu first version */ #include <rthw.h> #include <rtthread.h> #include <s3c24x0.h> #ifdef RT_USING_RTGUI #include <rtgui/rtgui_system.h> #include <rtgui/rtgui_server.h> #include <rtgui/event.h> #endif #include "lcd.h" #include "touch.h" /* ADCCON Register Bits */ #define S3C2410_ADCCON_ECFLG (1<<15) #define S3C2410_ADCCON_PRSCEN (1<<14) #define S3C2410_ADCCON_PRSCVL(x) (((x)&0xFF)<<6) #define S3C2410_ADCCON_PRSCVLMASK (0xFF<<6) #define S3C2410_ADCCON_SELMUX(x) (((x)&0x7)<<3) #define S3C2410_ADCCON_MUXMASK (0x7<<3) #define S3C2410_ADCCON_STDBM (1<<2) #define S3C2410_ADCCON_READ_START (1<<1) #define S3C2410_ADCCON_ENABLE_START (1<<0) #define S3C2410_ADCCON_STARTMASK (0x3<<0) /* ADCTSC Register Bits */ #define S3C2410_ADCTSC_UD_SEN (1<<8) /* ghcstop add for s3c2440a */ #define S3C2410_ADCTSC_YM_SEN (1<<7) #define S3C2410_ADCTSC_YP_SEN (1<<6) #define S3C2410_ADCTSC_XM_SEN (1<<5) #define S3C2410_ADCTSC_XP_SEN (1<<4) #define S3C2410_ADCTSC_PULL_UP_DISABLE (1<<3) #define S3C2410_ADCTSC_AUTO_PST (1<<2) #define S3C2410_ADCTSC_XY_PST(x) (((x)&0x3)<<0) /* ADCDAT0 Bits */ #define S3C2410_ADCDAT0_UPDOWN (1<<15) #define S3C2410_ADCDAT0_AUTO_PST (1<<14) #define S3C2410_ADCDAT0_XY_PST (0x3<<12) #define S3C2410_ADCDAT0_XPDATA_MASK (0x03FF) /* ADCDAT1 Bits */ #define S3C2410_ADCDAT1_UPDOWN (1<<15) #define S3C2410_ADCDAT1_AUTO_PST (1<<14) #define S3C2410_ADCDAT1_XY_PST (0x3<<12) #define S3C2410_ADCDAT1_YPDATA_MASK (0x03FF) #define WAIT4INT(x) (((x)<<8) | \ S3C2410_ADCTSC_YM_SEN | S3C2410_ADCTSC_YP_SEN | S3C2410_ADCTSC_XP_SEN | \ S3C2410_ADCTSC_XY_PST(3)) #define AUTOPST (S3C2410_ADCTSC_YM_SEN | S3C2410_ADCTSC_YP_SEN | S3C2410_ADCTSC_XP_SEN | \ S3C2410_ADCTSC_AUTO_PST | S3C2410_ADCTSC_XY_PST(0)) #define X_MIN 74 #define X_MAX 934 #define Y_MIN 920 #define Y_MAX 89 struct s3c2410ts { long xp; long yp; int count; int shift; int delay; int presc; char phys[32]; }; static struct s3c2410ts ts; struct rtgui_touch_device { struct rt_device parent; rt_timer_t poll_timer; rt_uint16_t x, y; rt_bool_t calibrating; rt_touch_calibration_func_t calibration_func; rt_touch_eventpost_func_t eventpost_func; void *eventpost_param; rt_uint16_t min_x, max_x; rt_uint16_t min_y, max_y; rt_uint16_t width; rt_uint16_t height; rt_bool_t first_down_report; }; static struct rtgui_touch_device *touch = RT_NULL; #ifdef RT_USING_RTGUI static void report_touch_input(int updown) { struct rtgui_event_mouse emouse; RTGUI_EVENT_MOUSE_BUTTON_INIT(&emouse); emouse.wid = RT_NULL; /* set emouse button */ emouse.button = RTGUI_MOUSE_BUTTON_LEFT; emouse.parent.sender = RT_NULL; if (updown) { ts.xp = ts.xp / ts.count; ts.yp = ts.yp / ts.count;; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { touch->x = ts.xp; touch->y = ts.yp; } else { if (touch->max_x > touch->min_x) { touch->x = touch->width * (ts.xp-touch->min_x)/(touch->max_x-touch->min_x); } else { touch->x = touch->width * ( touch->min_x - ts.xp ) / (touch->min_x-touch->max_x); } if (touch->max_y > touch->min_y) { touch->y = touch->height * ( ts.yp - touch->min_y ) / (touch->max_y-touch->min_y); } else { touch->y = touch->height * ( touch->min_y - ts.yp ) / (touch->min_y-touch->max_y); } } emouse.x = touch->x; emouse.y = touch->y; if (touch->first_down_report == RT_TRUE) { emouse.parent.type = RTGUI_EVENT_MOUSE_BUTTON; emouse.button |= RTGUI_MOUSE_BUTTON_DOWN; } else { emouse.parent.type = RTGUI_EVENT_MOUSE_MOTION; emouse.button = 0; } } else { emouse.x = touch->x; emouse.y = touch->y; emouse.parent.type = RTGUI_EVENT_MOUSE_BUTTON; emouse.button |= RTGUI_MOUSE_BUTTON_UP; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { /* callback function */ touch->calibration_func(emouse.x, emouse.y); } } /* rt_kprintf("touch %s: ts.x: %d, ts.y: %d\n", updown? "down" : "up", touch->x, touch->y); */ /* send event to server */ if (touch->calibrating != RT_TRUE) { rtgui_server_post_event((&emouse.parent), sizeof(emouse)); } } #else static void report_touch_input(int updown) { struct rt_touch_event touch_event; if (updown) { ts.xp = ts.xp / ts.count; ts.yp = ts.yp / ts.count; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { touch->x = ts.xp; touch->y = ts.yp; } else { if (touch->max_x > touch->min_x) { touch->x = touch->width * ( ts.xp - touch->min_x ) / (touch->max_x-touch->min_x); } else { touch->x = touch->width * ( touch->min_x - ts.xp ) / (touch->min_x-touch->max_x); } if (touch->max_y > touch->min_y) { touch->y = touch->height * ( ts.yp - touch->min_y ) / (touch->max_y-touch->min_y); } else { touch->y = touch->height * ( touch->min_y - ts.yp ) / (touch->min_y-touch->max_y); } } touch_event.x = touch->x; touch_event.y = touch->y; touch_event.pressed = 1; if (touch->first_down_report == RT_TRUE) { if (touch->calibrating != RT_TRUE && touch->eventpost_func) { touch->eventpost_func(touch->eventpost_param, &touch_event); } } } else { touch_event.x = touch->x; touch_event.y = touch->y; touch_event.pressed = 0; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { /* callback function */ touch->calibration_func(touch_event.x, touch_event.y); } if (touch->calibrating != RT_TRUE && touch->eventpost_func) { touch->eventpost_func(touch->eventpost_param, &touch_event); } } } #endif static void touch_timer_fire(void *parameter) { rt_uint32_t data0; rt_uint32_t data1; int updown; data0 = ADCDAT0; data1 = ADCDAT1; updown = (!(data0 & S3C2410_ADCDAT0_UPDOWN)) && (!(data1 & S3C2410_ADCDAT0_UPDOWN)); if (updown) { if (ts.count != 0) { report_touch_input(updown); } ts.xp = 0; ts.yp = 0; ts.count = 0; ADCTSC = S3C2410_ADCTSC_PULL_UP_DISABLE | AUTOPST; ADCCON |= S3C2410_ADCCON_ENABLE_START; } } static void s3c2410_adc_stylus_action(void) { rt_uint32_t data0; rt_uint32_t data1; data0 = ADCDAT0; data1 = ADCDAT1; ts.xp += data0 & S3C2410_ADCDAT0_XPDATA_MASK; ts.yp += data1 & S3C2410_ADCDAT1_YPDATA_MASK; ts.count ++; if (ts.count < (1<<ts.shift)) { ADCTSC = S3C2410_ADCTSC_PULL_UP_DISABLE | AUTOPST; ADCCON |= S3C2410_ADCCON_ENABLE_START; } else { if (touch->first_down_report) { report_touch_input(1); ts.xp = 0; ts.yp = 0; ts.count = 0; touch->first_down_report = 0; } /* start timer */ rt_timer_start(touch->poll_timer); ADCTSC = WAIT4INT(1); } SUBSRCPND |= BIT_SUB_ADC; } static void s3c2410_intc_stylus_updown(void) { rt_uint32_t data0; rt_uint32_t data1; int updown; data0 = ADCDAT0; data1 = ADCDAT1; updown = (!(data0 & S3C2410_ADCDAT0_UPDOWN)) && (!(data1 & S3C2410_ADCDAT0_UPDOWN)); /* rt_kprintf("stylus: %s\n", updown? "down" : "up"); */ if (updown) { touch_timer_fire(0); } else { /* stop timer */ rt_timer_stop(touch->poll_timer); touch->first_down_report = RT_TRUE; if (ts.xp >= 0 && ts.yp >= 0) { report_touch_input(updown); } ts.count = 0; ADCTSC = WAIT4INT(0); } SUBSRCPND |= BIT_SUB_TC; } static void rt_touch_handler(int irqno) { if (SUBSRCPND & BIT_SUB_ADC) { /* INT_SUB_ADC */ s3c2410_adc_stylus_action(); } if (SUBSRCPND & BIT_SUB_TC) { /* INT_SUB_TC */ s3c2410_intc_stylus_updown(); } /* clear interrupt */ INTPND |= (1ul << INTADC); } /* RT-Thread Device Interface */ static rt_err_t rtgui_touch_init(rt_device_t dev) { /* init touch screen structure */ rt_memset(&ts, 0, sizeof(struct s3c2410ts)); ts.delay = 50000; ts.presc = 9; ts.shift = 2; ts.count = 0; ts.xp = ts.yp = 0; ADCCON = S3C2410_ADCCON_PRSCEN | S3C2410_ADCCON_PRSCVL(ts.presc); ADCDLY = ts.delay; ADCTSC = WAIT4INT(0); rt_hw_interrupt_install(INTADC, rt_touch_handler, RT_NULL , "INTADC"); rt_hw_interrupt_umask(INTADC); /* clear interrupt */ INTPND |= (1ul << INTADC); SUBSRCPND |= BIT_SUB_TC; SUBSRCPND |= BIT_SUB_ADC; /* install interrupt handler */ INTSUBMSK &= ~BIT_SUB_ADC; INTSUBMSK &= ~BIT_SUB_TC; touch->first_down_report = RT_TRUE; return RT_EOK; } static rt_err_t rtgui_touch_control(rt_device_t dev, rt_uint8_t cmd, void *args) { switch (cmd) { case RT_TOUCH_CALIBRATION: touch->calibrating = RT_TRUE; touch->calibration_func = (rt_touch_calibration_func_t)args; break; case RT_TOUCH_NORMAL: touch->calibrating = RT_FALSE; break; case RT_TOUCH_CALIBRATION_DATA: { struct calibration_data *data; data = (struct calibration_data *)args; /* update */ touch->min_x = data->min_x; touch->max_x = data->max_x; touch->min_y = data->min_y; touch->max_y = data->max_y; /* rt_kprintf("min_x = %d, max_x = %d, min_y = %d, max_y = %d\n", touch->min_x, touch->max_x, touch->min_y, touch->max_y); */ } break; case RT_TOUCH_EVENTPOST: touch->eventpost_func = (rt_touch_eventpost_func_t)args; break; case RT_TOUCH_EVENTPOST_PARAM: touch->eventpost_param = args; break; } return RT_EOK; } void rtgui_touch_hw_init(void) { rt_err_t result = RT_FALSE; rt_device_t device = RT_NULL; struct rt_device_graphic_info info; touch = (struct rtgui_touch_device *)rt_malloc(sizeof(struct rtgui_touch_device)); if (touch == RT_NULL) return; /* no memory yet */ /* clear device structure */ rt_memset(&(touch->parent), 0, sizeof(struct rt_device)); touch->calibrating = RT_FALSE; touch->min_x = X_MIN; touch->max_x = X_MAX; touch->min_y = Y_MIN; touch->max_y = Y_MAX; touch->eventpost_func = RT_NULL; touch->eventpost_param = RT_NULL; /* init device structure */ touch->parent.type = RT_Device_Class_Unknown; touch->parent.init = rtgui_touch_init; touch->parent.control = rtgui_touch_control; touch->parent.user_data = RT_NULL; device = rt_device_find("lcd"); if (device == RT_NULL) return; /* no this device */ /* get graphic device info */ result = rt_device_control(device, RTGRAPHIC_CTRL_GET_INFO, &info); if (result != RT_EOK) { /* get device information failed */ return; } touch->width = info.width; touch->height = info.height; /* create 1/8 second timer */ touch->poll_timer = rt_timer_create("touch", touch_timer_fire, RT_NULL, RT_TICK_PER_SECOND/8, RT_TIMER_FLAG_PERIODIC); /* register touch device to RT-Thread */ rt_device_register(&(touch->parent), "touch", RT_DEVICE_FLAG_RDWR); }
zhangzq71/rt-thread
bsp/mini2440/drivers/touch.c
C
gpl-2.0
10,961
/* * Gadget Function Driver for MTP * * Copyright (C) 2010 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/file.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/usb.h> #include <linux/usb_usual.h> #include <linux/usb/ch9.h> #include <linux/usb/f_mtp.h> #define MTP_BULK_BUFFER_SIZE 16384 #define INTR_BUFFER_SIZE 28 /* String IDs */ #define INTERFACE_STRING_INDEX 0 /* values for mtp_dev.state */ #define STATE_OFFLINE 0 /* initial state, disconnected */ #define STATE_READY 1 /* ready for userspace calls */ #define STATE_BUSY 2 /* processing userspace calls */ #define STATE_CANCELED 3 /* transaction canceled by host */ #define STATE_ERROR 4 /* error from completion routine */ /* number of tx and rx requests to allocate */ #define TX_REQ_MAX 4 #define RX_REQ_MAX 2 #define INTR_REQ_MAX 5 /* ID for Microsoft MTP OS String */ #define MTP_OS_STRING_ID 0xEE /* MTP class reqeusts */ #define MTP_REQ_CANCEL 0x64 #define MTP_REQ_GET_EXT_EVENT_DATA 0x65 #define MTP_REQ_RESET 0x66 #define MTP_REQ_GET_DEVICE_STATUS 0x67 /* constants for device status */ #define MTP_RESPONSE_OK 0x2001 #define MTP_RESPONSE_DEVICE_BUSY 0x2019 static const char mtp_shortname[] = "mtp_usb"; struct mtp_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; struct usb_ep *ep_in; struct usb_ep *ep_out; struct usb_ep *ep_intr; int state; /* synchronize access to our device file */ atomic_t open_excl; /* to enforce only one ioctl at a time */ atomic_t ioctl_excl; struct list_head tx_idle; struct list_head intr_idle; wait_queue_head_t read_wq; wait_queue_head_t write_wq; wait_queue_head_t intr_wq; struct usb_request *rx_req[RX_REQ_MAX]; int rx_done; /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue */ struct workqueue_struct *wq; struct work_struct send_file_work; struct work_struct receive_file_work; struct file *xfer_file; loff_t xfer_file_offset; int64_t xfer_file_length; unsigned xfer_send_header; uint16_t xfer_command; uint32_t xfer_transaction_id; int xfer_result; int zlp_maxpacket; }; static struct usb_interface_descriptor mtp_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, .bInterfaceProtocol = 0, }; static struct usb_interface_descriptor ptp_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_STILL_IMAGE, .bInterfaceSubClass = 1, .bInterfaceProtocol = 1, }; static struct usb_endpoint_descriptor mtp_highspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor mtp_highspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor mtp_intr_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE), .bInterval = 6, }; static struct usb_descriptor_header *fs_mtp_descs[] = { (struct usb_descriptor_header *) &mtp_interface_desc, (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *hs_mtp_descs[] = { (struct usb_descriptor_header *) &mtp_interface_desc, (struct usb_descriptor_header *) &mtp_highspeed_in_desc, (struct usb_descriptor_header *) &mtp_highspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *fs_ptp_descs[] = { (struct usb_descriptor_header *) &ptp_interface_desc, (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *hs_ptp_descs[] = { (struct usb_descriptor_header *) &ptp_interface_desc, (struct usb_descriptor_header *) &mtp_highspeed_in_desc, (struct usb_descriptor_header *) &mtp_highspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_string mtp_string_defs[] = { /* Naming interface "MTP" so libmtp will recognize us */ [INTERFACE_STRING_INDEX].s = "MTP", { }, /* end of list */ }; static struct usb_gadget_strings mtp_string_table = { .language = 0x0409, /* en-US */ .strings = mtp_string_defs, }; static struct usb_gadget_strings *mtp_strings[] = { &mtp_string_table, NULL, }; /* Microsoft MTP OS String */ static u8 mtp_os_string[] = { 18, /* sizeof(mtp_os_string) */ USB_DT_STRING, /* Signature field: "MSFT100" */ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, /* vendor code */ 1, /* padding */ 0 }; /* Microsoft Extended Configuration Descriptor Header Section */ struct mtp_ext_config_desc_header { __le32 dwLength; __u16 bcdVersion; __le16 wIndex; __u8 bCount; __u8 reserved[7]; }; /* Microsoft Extended Configuration Descriptor Function Section */ struct mtp_ext_config_desc_function { __u8 bFirstInterfaceNumber; __u8 bInterfaceCount; __u8 compatibleID[8]; __u8 subCompatibleID[8]; __u8 reserved[6]; }; /* MTP Extended Configuration Descriptor */ struct { struct mtp_ext_config_desc_header header; struct mtp_ext_config_desc_function function; } mtp_ext_config_desc = { .header = { .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), .bcdVersion = __constant_cpu_to_le16(0x0100), .wIndex = __constant_cpu_to_le16(4), .bCount = __constant_cpu_to_le16(1), }, .function = { .bFirstInterfaceNumber = 0, .bInterfaceCount = 1, .compatibleID = { 'M', 'T', 'P' }, }, }; struct mtp_device_status { __le16 wLength; __le16 wCode; }; /* temporary variable used between mtp_open() and mtp_gadget_bind() */ static struct mtp_dev *_mtp_dev; static inline struct mtp_dev *func_to_mtp(struct usb_function *f) { return container_of(f, struct mtp_dev, function); } static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } return req; } static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static inline int mtp_lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -1; } } static inline void mtp_unlock(atomic_t *excl) { atomic_dec(excl); } /* add a request to the tail of a list */ static void mtp_req_put(struct mtp_dev *dev, struct list_head *head, struct usb_request *req) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); list_add_tail(&req->list, head); spin_unlock_irqrestore(&dev->lock, flags); } /* remove a request from the head of a list */ static struct usb_request *mtp_req_get(struct mtp_dev *dev, struct list_head *head) { unsigned long flags; struct usb_request *req; spin_lock_irqsave(&dev->lock, flags); if (list_empty(head)) { req = 0; } else { req = list_first_entry(head, struct usb_request, list); list_del(&req->list); } spin_unlock_irqrestore(&dev->lock, flags); return req; } static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; if (req->status != 0) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); } static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; dev->rx_done = 1; if (req->status != 0) dev->state = STATE_ERROR; wake_up(&dev->read_wq); } static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; if (req->status != 0) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->intr_idle, req); wake_up(&dev->intr_wq); } static int mtp_create_bulk_endpoints(struct mtp_dev *dev, struct usb_endpoint_descriptor *in_desc, struct usb_endpoint_descriptor *out_desc, struct usb_endpoint_descriptor *intr_desc) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; struct usb_ep *ep; int i; DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_in = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; ep = usb_ep_autoconfig(cdev->gadget, intr_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_intr = ep; /* now allocate requests for our endpoints */ for (i = 0; i < TX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_in; mtp_req_put(dev, &dev->tx_idle, req); } for (i = 0; i < RX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_out; dev->rx_req[i] = req; } for (i = 0; i < INTR_REQ_MAX; i++) { req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_intr; mtp_req_put(dev, &dev->intr_idle, req); } return 0; fail: printk(KERN_ERR "mtp_bind() could not allocate requests\n"); return -1; } static ssize_t mtp_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; int r = count, xfer; int ret = 0; DBG(cdev, "mtp_read(%d)\n", count); if (count > MTP_BULK_BUFFER_SIZE) return -EINVAL; /* we will block until we're online */ DBG(cdev, "mtp_read: waiting for online state\n"); ret = wait_event_interruptible(dev->read_wq, dev->state != STATE_OFFLINE); if (ret < 0) { r = ret; goto done; } spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); requeue_req: /* queue a request */ req = dev->rx_req[0]; req->length = count; dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); if (ret < 0) { r = -EIO; goto done; } else { DBG(cdev, "rx %p queue\n", req); } /* wait for a request to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done); if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); goto done; } if (dev->state == STATE_BUSY) { /* If we got a 0-len packet, throw it back and try again. */ if (req->actual == 0) goto requeue_req; DBG(cdev, "rx %p %d\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; r = xfer; if (copy_to_user(buf, req->buf, xfer)) r = -EFAULT; } else r = -EIO; done: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_read returning %d\n", r); return r; } static ssize_t mtp_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; int r = count, xfer; int sendZLP = 0; int ret; DBG(cdev, "mtp_write(%d)\n", count); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); return -ENODEV; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((count & (dev->zlp_maxpacket - 1)) == 0) sendZLP = 1; while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; if (dev->state != STATE_BUSY) { DBG(cdev, "mtp_write dev->error\n"); r = -EIO; break; } /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(dev->write_wq, ((req = mtp_req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY)); if (!req) { r = ret; break; } if (count > MTP_BULK_BUFFER_SIZE) xfer = MTP_BULK_BUFFER_SIZE; else xfer = count; if (xfer && copy_from_user(req->buf, buf, xfer)) { r = -EFAULT; break; } req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "mtp_write: xfer error %d\n", ret); r = -EIO; break; } buf += xfer; count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) mtp_req_put(dev, &dev->tx_idle, req); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_write returning %d\n", r); return r; } /* read from a local file and write to USB */ static void send_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; struct mtp_data_header *header; struct file *filp; loff_t offset; int64_t count; int xfer, ret, hdr_size; int r = 0; int sendZLP = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "send_file_work(%lld %lld)\n", offset, count); if (dev->xfer_send_header) { hdr_size = sizeof(struct mtp_data_header); count += hdr_size; } else { hdr_size = 0; } /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((count & (dev->zlp_maxpacket - 1)) == 0) sendZLP = 1; while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(dev->write_wq, (req = mtp_req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; break; } if (!req) { r = ret; break; } if (count > MTP_BULK_BUFFER_SIZE) xfer = MTP_BULK_BUFFER_SIZE; else xfer = count; if (hdr_size) { /* prepend MTP data header */ header = (struct mtp_data_header *)req->buf; header->length = __cpu_to_le32(count); header->type = __cpu_to_le16(2); /* data packet */ header->command = __cpu_to_le16(dev->xfer_command); header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id); } ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset); if (ret < 0) { r = ret; break; } xfer = ret + hdr_size; hdr_size = 0; req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); dev->state = STATE_ERROR; r = -EIO; break; } count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) mtp_req_put(dev, &dev->tx_idle, req); DBG(cdev, "send_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } /* read from USB and write to a local file */ static void receive_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *read_req = NULL, *write_req = NULL; struct file *filp; loff_t offset; int64_t count; int ret, cur_buf = 0; int r = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "receive_file_work(%lld)\n", count); while (count > 0 || write_req) { if (count > 0) { /* queue a request */ read_req = dev->rx_req[cur_buf]; cur_buf = (cur_buf + 1) % RX_REQ_MAX; read_req->length = (count > MTP_BULK_BUFFER_SIZE ? MTP_BULK_BUFFER_SIZE : count); dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; dev->state = STATE_ERROR; break; } } if (write_req) { DBG(cdev, "rx %p %d\n", write_req, write_req->actual); ret = vfs_write(filp, write_req->buf, write_req->actual, &offset); DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; dev->state = STATE_ERROR; break; } write_req = NULL; } if (read_req) { /* wait for our last read to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; if (!dev->rx_done) usb_ep_dequeue(dev->ep_out, read_req); break; } /* if xfer_file_length is 0xFFFFFFFF, then we read until * we get a zero length packet */ if (count != 0xFFFFFFFF) count -= read_req->actual; if (read_req->actual < read_req->length) { /* short packet is used to signal EOF for sizes > 4 gig */ DBG(cdev, "got short packet\n"); count = 0; } write_req = read_req; read_req = NULL; } } DBG(cdev, "receive_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) { struct usb_request *req= NULL; int ret; int length = event->length; DBG(dev->cdev, "mtp_send_event(%d)\n", event->length); if (length < 0 || length > INTR_BUFFER_SIZE) return -EINVAL; if (dev->state == STATE_OFFLINE) return -ENODEV; ret = wait_event_interruptible_timeout(dev->intr_wq, (req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000)); if (!req) return -ETIME; if (copy_from_user(req->buf, (void __user *)event->data, length)) { mtp_req_put(dev, &dev->intr_idle, req); return -EFAULT; } req->length = length; ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); if (ret) mtp_req_put(dev, &dev->intr_idle, req); return ret; } static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) { struct mtp_dev *dev = fp->private_data; struct file *filp = NULL; int ret = -EINVAL; if (mtp_lock(&dev->ioctl_excl)) return -EBUSY; switch (code) { case MTP_SEND_FILE: case MTP_RECEIVE_FILE: case MTP_SEND_FILE_WITH_HEADER: { struct mtp_file_range mfr; struct work_struct *work; spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); ret = -ECANCELED; goto out; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); ret = -ENODEV; goto out; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { ret = -EFAULT; goto fail; } /* hold a reference to the file while we are working with it */ filp = fget(mfr.fd); if (!filp) { ret = -EBADF; goto fail; } /* write the parameters */ dev->xfer_file = filp; dev->xfer_file_offset = mfr.offset; dev->xfer_file_length = mfr.length; smp_wmb(); if (code == MTP_SEND_FILE_WITH_HEADER) { work = &dev->send_file_work; dev->xfer_send_header = 1; dev->xfer_command = mfr.command; dev->xfer_transaction_id = mfr.transaction_id; } else if (code == MTP_SEND_FILE) { work = &dev->send_file_work; dev->xfer_send_header = 0; } else { work = &dev->receive_file_work; } /* We do the file transfer on a work queue so it will run * in kernel context, which is necessary for vfs_read and * vfs_write to use our buffers in the kernel address space. */ queue_work(dev->wq, work); /* wait for operation to complete */ flush_workqueue(dev->wq); fput(filp); /* read the result */ smp_rmb(); ret = dev->xfer_result; break; } case MTP_SEND_EVENT: { struct mtp_event event; /* return here so we don't change dev->state below, * which would interfere with bulk transfer state. */ if (copy_from_user(&event, (void __user *)value, sizeof(event))) ret = -EFAULT; else ret = mtp_send_event(dev, &event); goto out; } } fail: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) ret = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); out: mtp_unlock(&dev->ioctl_excl); DBG(dev->cdev, "ioctl returning %d\n", ret); return ret; } static int mtp_open(struct inode *ip, struct file *fp) { struct usb_descriptor_header **descriptors; printk(KERN_INFO "mtp_open\n"); if (!_mtp_dev->cdev) { WARN(1, "_mtp_dev->cdev is NULL in mtp_open\n"); return -ENODEV; } if (mtp_lock(&_mtp_dev->open_excl)) return -EBUSY; /* clear any error condition */ if (_mtp_dev->state != STATE_OFFLINE) _mtp_dev->state = STATE_READY; if (_mtp_dev->cdev->gadget->speed == USB_SPEED_HIGH) descriptors = _mtp_dev->function.hs_descriptors; else descriptors = _mtp_dev->function.descriptors; /* find mtp ep_in descriptor */ for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; ep = (struct usb_endpoint_descriptor *)*descriptors; if (ep->bDescriptorType == USB_DT_ENDPOINT && (ep->bEndpointAddress & USB_DIR_IN) && ep->bmAttributes == USB_ENDPOINT_XFER_BULK) { _mtp_dev->zlp_maxpacket = __le16_to_cpu(ep->wMaxPacketSize); fp->private_data = _mtp_dev; return 0; } } return -ENODEV; } static int mtp_release(struct inode *ip, struct file *fp) { printk(KERN_INFO "mtp_release\n"); mtp_unlock(&_mtp_dev->open_excl); return 0; } /* file operations for /dev/mtp_usb */ static const struct file_operations mtp_fops = { .owner = THIS_MODULE, .read = mtp_read, .write = mtp_write, .unlocked_ioctl = mtp_ioctl, .open = mtp_open, .release = mtp_release, }; static struct miscdevice mtp_device = { .minor = MISC_DYNAMIC_MINOR, .name = mtp_shortname, .fops = &mtp_fops, }; static int mtp_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { struct mtp_dev *dev = _mtp_dev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; VDBG(cdev, "mtp_ctrlrequest " "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* Handle MTP OS string */ if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && (w_value >> 8) == USB_DT_STRING && (w_value & 0xFF) == MTP_OS_STRING_ID) { value = (w_length < sizeof(mtp_os_string) ? w_length : sizeof(mtp_os_string)); memcpy(cdev->req->buf, mtp_os_string, value); } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { /* Handle MTP OS descriptor */ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (ctrl->bRequest == 1 && (ctrl->bRequestType & USB_DIR_IN) && (w_index == 4 || w_index == 5)) { value = (w_length < sizeof(mtp_ext_config_desc) ? w_length : sizeof(mtp_ext_config_desc)); memcpy(cdev->req->buf, &mtp_ext_config_desc, value); } } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { DBG(cdev, "class request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 && w_value == 0) { DBG(cdev, "MTP_REQ_CANCEL\n"); spin_lock_irqsave(&dev->lock, flags); if (dev->state == STATE_BUSY) { dev->state = STATE_CANCELED; wake_up(&dev->read_wq); wake_up(&dev->write_wq); } spin_unlock_irqrestore(&dev->lock, flags); /* We need to queue a request to read the remaining * bytes, but we don't actually need to look at * the contents. */ value = w_length; } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS && w_index == 0 && w_value == 0) { struct mtp_device_status *status = cdev->req->buf; status->wLength = __constant_cpu_to_le16(sizeof(*status)); DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n"); spin_lock_irqsave(&dev->lock, flags); /* device status is "busy" until we report * the cancelation to userspace */ if (dev->state == STATE_CANCELED) status->wCode = __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY); else status->wCode = __cpu_to_le16(MTP_RESPONSE_OK); spin_unlock_irqrestore(&dev->lock, flags); value = sizeof(*status); } } /* respond with data transfer or status phase? */ if (value >= 0) { int rc; cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) ERROR(cdev, "%s setup response queue error\n", __func__); } return value; } static int mtp_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct mtp_dev *dev = func_to_mtp(f); int id; int ret; dev->cdev = cdev; DBG(cdev, "mtp_function_bind dev: %p\n", dev); /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; mtp_interface_desc.bInterfaceNumber = id; /* allocate endpoints */ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc, &mtp_fullspeed_out_desc, &mtp_intr_desc); if (ret) return ret; /* support high speed hardware */ if (gadget_is_dualspeed(c->cdev->gadget)) { mtp_highspeed_in_desc.bEndpointAddress = mtp_fullspeed_in_desc.bEndpointAddress; mtp_highspeed_out_desc.bEndpointAddress = mtp_fullspeed_out_desc.bEndpointAddress; } DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", f->name, dev->ep_in->name, dev->ep_out->name); return 0; } static void mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct mtp_dev *dev = func_to_mtp(f); struct usb_request *req; int i; while ((req = mtp_req_get(dev, &dev->tx_idle))) mtp_request_free(req, dev->ep_in); for (i = 0; i < RX_REQ_MAX; i++) mtp_request_free(dev->rx_req[i], dev->ep_out); while ((req = mtp_req_get(dev, &dev->intr_idle))) mtp_request_free(req, dev->ep_intr); dev->state = STATE_OFFLINE; } static int mtp_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct mtp_dev *dev = func_to_mtp(f); struct usb_composite_dev *cdev = f->config->cdev; int ret; DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt); ret = usb_ep_enable(dev->ep_in, ep_choose(cdev->gadget, &mtp_highspeed_in_desc, &mtp_fullspeed_in_desc)); if (ret) return ret; ret = usb_ep_enable(dev->ep_out, ep_choose(cdev->gadget, &mtp_highspeed_out_desc, &mtp_fullspeed_out_desc)); if (ret) { usb_ep_disable(dev->ep_in); return ret; } ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc); if (ret) { usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_in); return ret; } dev->state = STATE_READY; /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); return 0; } static void mtp_function_disable(struct usb_function *f) { struct mtp_dev *dev = func_to_mtp(f); struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "mtp_function_disable\n"); dev->state = STATE_OFFLINE; usb_ep_disable(dev->ep_in); usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_intr); /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); VDBG(cdev, "%s disabled\n", dev->function.name); } static int mtp_bind_config(struct usb_configuration *c, bool ptp_config) { struct mtp_dev *dev = _mtp_dev; int ret = 0; printk(KERN_INFO "mtp_bind_config\n"); /* allocate a string ID for our interface */ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) { ret = usb_string_id(c->cdev); if (ret < 0) return ret; mtp_string_defs[INTERFACE_STRING_INDEX].id = ret; mtp_interface_desc.iInterface = ret; } dev->cdev = c->cdev; dev->function.name = "mtp"; dev->function.strings = mtp_strings; if (ptp_config) { dev->function.descriptors = fs_ptp_descs; dev->function.hs_descriptors = hs_ptp_descs; } else { dev->function.descriptors = fs_mtp_descs; dev->function.hs_descriptors = hs_mtp_descs; } dev->function.bind = mtp_function_bind; dev->function.unbind = mtp_function_unbind; dev->function.set_alt = mtp_function_set_alt; dev->function.disable = mtp_function_disable; return usb_add_function(c, &dev->function); } static int mtp_setup(void) { struct mtp_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); init_waitqueue_head(&dev->intr_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); INIT_LIST_HEAD(&dev->tx_idle); INIT_LIST_HEAD(&dev->intr_idle); dev->wq = create_singlethread_workqueue("f_mtp"); if (!dev->wq) { ret = -ENOMEM; goto err1; } INIT_WORK(&dev->send_file_work, send_file_work); INIT_WORK(&dev->receive_file_work, receive_file_work); _mtp_dev = dev; ret = misc_register(&mtp_device); if (ret) goto err2; return 0; err2: destroy_workqueue(dev->wq); err1: _mtp_dev = NULL; kfree(dev); printk(KERN_ERR "mtp gadget driver failed to initialize\n"); return ret; } static void mtp_cleanup(void) { struct mtp_dev *dev = _mtp_dev; if (!dev) return; misc_deregister(&mtp_device); destroy_workqueue(dev->wq); _mtp_dev = NULL; kfree(dev); }
transi/kernel_amazon_bowser-common
drivers/usb/gadget/f_mtp.c
C
gpl-2.0
32,603
/* * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR * policies) */ #include "sched.h" #include <linux/slab.h> #include <trace/events/sched.h> int sched_rr_timeslice = RR_TIMESLICE; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); struct rt_bandwidth def_rt_bandwidth; static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) { struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer); ktime_t now; int overrun; int idle = 0; for (;;) { now = hrtimer_cb_get_time(timer); overrun = hrtimer_forward(timer, now, rt_b->rt_period); if (!overrun) break; idle = do_sched_rt_period_timer(rt_b, overrun); } return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) { rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = sched_rt_period_timer; } static void start_rt_bandwidth(struct rt_bandwidth *rt_b) { if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) return; if (hrtimer_active(&rt_b->rt_period_timer)) return; raw_spin_lock(&rt_b->rt_runtime_lock); start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); raw_spin_unlock(&rt_b->rt_runtime_lock); } void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) { struct rt_prio_array *array; int i; array = &rt_rq->active; for (i = 0; i < MAX_RT_PRIO; i++) { INIT_LIST_HEAD(array->queue + i); __clear_bit(i, array->bitmap); } /* delimiter for bitsearch: */ __set_bit(MAX_RT_PRIO, array->bitmap); #if defined CONFIG_SMP rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); #endif rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); } #ifdef CONFIG_RT_GROUP_SCHED static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) { hrtimer_cancel(&rt_b->rt_period_timer); } #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { #ifdef CONFIG_SCHED_DEBUG WARN_ON_ONCE(!rt_entity_is_task(rt_se)); #endif return container_of(rt_se, struct task_struct, rt); } static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return rt_rq->rq; } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { return rt_se->rt_rq; } void free_rt_sched_group(struct task_group *tg) { int i; if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); if (tg->rt_se) kfree(tg->rt_se[i]); } kfree(tg->rt_rq); kfree(tg->rt_se); } void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, struct sched_rt_entity *parent) { struct rq *rq = cpu_rq(cpu); rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->rt_nr_boosted = 0; rt_rq->rq = rq; rt_rq->tg = tg; tg->rt_rq[cpu] = rt_rq; tg->rt_se[cpu] = rt_se; if (!rt_se) return; if (!parent) rt_se->rt_rq = &rq->rt; else rt_se->rt_rq = parent->my_q; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); } int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err; init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) goto err; rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i)); if (!rt_se) goto err_free_rq; init_rt_rq(rt_rq, cpu_rq(i)); rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); } return 1; err_free_rq: kfree(rt_rq); err: return 0; } #else /* CONFIG_RT_GROUP_SCHED */ #define rt_entity_is_task(rt_se) (1) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { return container_of(rt_se, struct task_struct, rt); } static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return container_of(rt_rq, struct rq, rt); } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { struct task_struct *p = rt_task_of(rt_se); struct rq *rq = task_rq(p); return &rq->rt; } void free_rt_sched_group(struct task_group *tg) { } int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { return 1; } #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); } static inline void rt_set_overload(struct rq *rq) { if (!rq->online) return; cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); /* * Make sure the mask is visible before we set * the overload count. That is checked to determine * if we should look at the mask. It would be a shame * if we looked at the mask, but the mask was not * updated yet. */ wmb(); atomic_inc(&rq->rd->rto_count); } static inline void rt_clear_overload(struct rq *rq) { if (!rq->online) return; /* the order here really doesn't matter */ atomic_dec(&rq->rd->rto_count); cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); } static void update_rt_migration(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { if (!rt_rq->overloaded) { rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 1; } } else if (rt_rq->overloaded) { rt_clear_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 0; } } static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); } static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); } static inline int has_pushable_tasks(struct rq *rq) { return !plist_head_empty(&rq->rt.pushable_tasks); } static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_node_init(&p->pushable_tasks, p->prio); plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the highest prio pushable task */ if (p->prio < rq->rt.highest_prio.next) rq->rt.highest_prio.next = p->prio; } static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the new highest prio pushable task */ if (has_pushable_tasks(rq)) { p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks); rq->rt.highest_prio.next = p->prio; } else rq->rt.highest_prio.next = MAX_RT_PRIO; } #else static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { } static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) { } static inline void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { } static inline void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { } #endif /* CONFIG_SMP */ static inline int on_rt_rq(struct sched_rt_entity *rt_se) { return !list_empty(&rt_se->run_list); } #ifdef CONFIG_RT_GROUP_SCHED static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { if (!rt_rq->tg) return RUNTIME_INF; return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); } typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); if (&tg->list == &task_groups) tg = NULL; return tg; } #define for_each_rt_rq(rt_rq, iter, rq) \ for (iter = container_of(&task_groups, typeof(*iter), list); \ (iter = next_task_group(iter)) && \ (rt_rq = iter->rt_rq[cpu_of(rq)]);) static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list); } static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) { list_del_rcu(&rt_rq->leaf_rt_rq_list); } #define for_each_leaf_rt_rq(rt_rq, rq) \ list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return rt_se->my_q; } static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct sched_rt_entity *rt_se; int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_se = rt_rq->tg->rt_se[cpu]; if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) enqueue_rt_entity(rt_se, false); if (rt_rq->highest_prio.curr < curr->prio) resched_task(curr); } } static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { struct sched_rt_entity *rt_se; int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_se = rt_rq->tg->rt_se[cpu]; if (rt_se && on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; } static int rt_se_boosted(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); struct task_struct *p; if (rt_rq) return !!rt_rq->rt_nr_boosted; p = rt_task_of(rt_se); return p->prio != p->normal_prio; } #ifdef CONFIG_SMP static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_rq(smp_processor_id())->rd->span; } #else static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_online_mask; } #endif static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &rt_rq->tg->rt_bandwidth; } #else /* !CONFIG_RT_GROUP_SCHED */ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(def_rt_bandwidth.rt_period); } typedef struct rt_rq *rt_rq_iter_t; #define for_each_rt_rq(rt_rq, iter, rq) \ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { } static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) { } #define for_each_leaf_rt_rq(rt_rq, rq) \ for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = NULL) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return NULL; } static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_running) resched_task(rq_of_rt_rq(rt_rq)->curr); } static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled; } static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_online_mask; } static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return &cpu_rq(cpu)->rt; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &def_rt_bandwidth; } #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP /* * We ran out of runtime, see if we can borrow some from our neighbours. */ static int do_balance_runtime(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; int i, weight, more = 0; u64 rt_period; weight = cpumask_weight(rd->span); raw_spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); for_each_cpu(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; if (iter == rt_rq) continue; raw_spin_lock(&iter->rt_runtime_lock); /* * Either all rqs have inf runtime and there's nothing to steal * or __disable_runtime() below sets a specific rq to inf to * indicate its been disabled and disalow stealing. */ if (iter->rt_runtime == RUNTIME_INF) goto next; /* * From runqueues with spare time, take 1/n part of their * spare time, but no more than our period. */ diff = iter->rt_runtime - iter->rt_time; if (diff > 0) { diff = div_u64((u64)diff, weight); if (rt_rq->rt_runtime + diff > rt_period) diff = rt_period - rt_rq->rt_runtime; iter->rt_runtime -= diff; rt_rq->rt_runtime += diff; more = 1; if (rt_rq->rt_runtime == rt_period) { raw_spin_unlock(&iter->rt_runtime_lock); break; } } next: raw_spin_unlock(&iter->rt_runtime_lock); } raw_spin_unlock(&rt_b->rt_runtime_lock); return more; } /* * Ensure this RQ takes back all the runtime it lend to its neighbours. */ static void __disable_runtime(struct rq *rq) { struct root_domain *rd = rq->rd; rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; for_each_rt_rq(rt_rq, iter, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); s64 want; int i; raw_spin_lock(&rt_b->rt_runtime_lock); raw_spin_lock(&rt_rq->rt_runtime_lock); /* * Either we're all inf and nobody needs to borrow, or we're * already disabled and thus have nothing to do, or we have * exactly the right amount of runtime to take out. */ if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) goto balanced; raw_spin_unlock(&rt_rq->rt_runtime_lock); /* * Calculate the difference between what we started out with * and what we current have, that's the amount of runtime * we lend and now have to reclaim. */ want = rt_b->rt_runtime - rt_rq->rt_runtime; /* * Greedy reclaim, take back as much as we can. */ for_each_cpu(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; /* * Can't reclaim from ourselves or disabled runqueues. */ if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) continue; raw_spin_lock(&iter->rt_runtime_lock); if (want > 0) { diff = min_t(s64, iter->rt_runtime, want); iter->rt_runtime -= diff; want -= diff; } else { iter->rt_runtime -= want; want -= want; } raw_spin_unlock(&iter->rt_runtime_lock); if (!want) break; } raw_spin_lock(&rt_rq->rt_runtime_lock); /* * We cannot be left wanting - that would mean some runtime * leaked out of the system. */ BUG_ON(want); balanced: /* * Disable all the borrow logic by pretending we have inf * runtime - in which case borrowing doesn't make sense. */ rt_rq->rt_runtime = RUNTIME_INF; rt_rq->rt_throttled = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock); } } static void __enable_runtime(struct rq *rq) { rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; /* * Reset each runqueue's bandwidth settings */ for_each_rt_rq(rt_rq, iter, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); raw_spin_lock(&rt_b->rt_runtime_lock); raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock); } } static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; if (!sched_feat(RT_RUNTIME_SHARE)) return more; if (rt_rq->rt_time > rt_rq->rt_runtime) { raw_spin_unlock(&rt_rq->rt_runtime_lock); more = do_balance_runtime(rt_rq); raw_spin_lock(&rt_rq->rt_runtime_lock); } return more; } #else /* !CONFIG_SMP */ static inline int balance_runtime(struct rt_rq *rt_rq) { return 0; } #endif /* CONFIG_SMP */ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) { int i, idle = 1, throttled = 0; const struct cpumask *span; span = sched_rt_period_mask(); #ifdef CONFIG_RT_GROUP_SCHED /* * FIXME: isolated CPUs should really leave the root task group, * whether they are isolcpus or were isolated via cpusets, lest * the timer run on a CPU which does not service all runqueues, * potentially leaving other CPUs indefinitely throttled. If * isolation is really required, the user will turn the throttle * off to kill the perturbations it causes anyway. Meanwhile, * this maintains functionality for boot and/or troubleshooting. */ if (rt_b == &root_task_group.rt_bandwidth) span = cpu_online_mask; #endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); raw_spin_lock(&rq->lock); if (rt_rq->rt_time) { u64 runtime; raw_spin_lock(&rt_rq->rt_runtime_lock); if (rt_rq->rt_throttled) balance_runtime(rt_rq); runtime = rt_rq->rt_runtime; rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq->rt_throttled = 0; enqueue = 1; /* * Force a clock update if the CPU was idle, * lest wakeup -> unthrottle time accumulate. */ if (rt_rq->rt_nr_running && rq->curr == rq->idle) rq->skip_clock_update = -1; } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); } else if (rt_rq->rt_nr_running) { idle = 0; if (!rt_rq_throttled(rt_rq)) enqueue = 1; } if (rt_rq->rt_throttled) throttled = 1; if (enqueue) sched_rt_rq_enqueue(rt_rq); raw_spin_unlock(&rq->lock); } if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) return 1; return idle; } static inline int rt_se_prio(struct sched_rt_entity *rt_se) { #ifdef CONFIG_RT_GROUP_SCHED struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq) return rt_rq->highest_prio.curr; #endif return rt_task_of(rt_se)->prio; } static void dump_throttled_rt_tasks(struct rt_rq *rt_rq) { struct rt_prio_array *array = &rt_rq->active; struct sched_rt_entity *rt_se; char buf[500]; char *pos = buf; char *end = buf + sizeof(buf); int idx; pos += snprintf(pos, sizeof(buf), "sched: RT throttling activated for rt_rq %p (cpu %d)\n", rt_rq, cpu_of(rq_of_rt_rq(rt_rq))); if (bitmap_empty(array->bitmap, MAX_RT_PRIO)) goto out; pos += snprintf(pos, end - pos, "potential CPU hogs:\n"); idx = sched_find_first_bit(array->bitmap); while (idx < MAX_RT_PRIO) { list_for_each_entry(rt_se, array->queue + idx, run_list) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) continue; p = rt_task_of(rt_se); if (pos < end) pos += snprintf(pos, end - pos, "\t%s (%d)\n", p->comm, p->pid); } idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1); } out: #ifdef CONFIG_PANIC_ON_RT_THROTTLING /* * Use pr_err() in the BUG() case since printk_sched() will * not get flushed and deadlock is not a concern. */ pr_err("%s", buf); BUG(); #else printk_deferred("%s", buf); #endif } static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) { u64 runtime = sched_rt_runtime(rt_rq); if (rt_rq->rt_throttled) return rt_rq_throttled(rt_rq); if (runtime >= sched_rt_period(rt_rq)) return 0; balance_runtime(rt_rq); runtime = sched_rt_runtime(rt_rq); if (runtime == RUNTIME_INF) return 0; if (rt_rq->rt_time > runtime) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); /* * Don't actually throttle groups that have no runtime assigned * but accrue some time due to boosting. */ if (likely(rt_b->rt_runtime)) { static bool once = false; rt_rq->rt_throttled = 1; if (!once) { once = true; dump_throttled_rt_tasks(rt_rq); } } else { /* * In case we did anyway, make it go away, * replenishment is a joke, since it will replenish us * with exactly 0 ns. */ rt_rq->rt_time = 0; } if (rt_rq_throttled(rt_rq)) { sched_rt_rq_dequeue(rt_rq); return 1; } } return 0; } /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. */ static void update_curr_rt(struct rq *rq) { struct task_struct *curr = rq->curr; struct sched_rt_entity *rt_se = &curr->rt; struct rt_rq *rt_rq = rt_rq_of_se(rt_se); u64 delta_exec; if (curr->sched_class != &rt_sched_class) return; delta_exec = rq->clock_task - curr->se.exec_start; if (unlikely((s64)delta_exec <= 0)) return; schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); curr->se.exec_start = rq->clock_task; cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); if (!rt_bandwidth_enabled()) return; for_each_sched_rt_entity(rt_se) { rt_rq = rt_rq_of_se(rt_se); if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_time += delta_exec; if (sched_rt_runtime_exceeded(rt_rq)) resched_task(curr); raw_spin_unlock(&rt_rq->rt_runtime_lock); } } } #if defined CONFIG_SMP static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); #ifdef CONFIG_RT_GROUP_SCHED /* * Change rq's cpupri only if rt_rq is the top queue. */ if (&rq->rt != rt_rq) return; #endif if (rq->online && prio < prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } static void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); #ifdef CONFIG_RT_GROUP_SCHED /* * Change rq's cpupri only if rt_rq is the top queue. */ if (&rq->rt != rt_rq) return; #endif if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } #else /* CONFIG_SMP */ static inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} static inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} #endif /* CONFIG_SMP */ #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED static void inc_rt_prio(struct rt_rq *rt_rq, int prio) { int prev_prio = rt_rq->highest_prio.curr; if (prio < prev_prio) rt_rq->highest_prio.curr = prio; inc_rt_prio_smp(rt_rq, prio, prev_prio); } static void dec_rt_prio(struct rt_rq *rt_rq, int prio) { int prev_prio = rt_rq->highest_prio.curr; if (rt_rq->rt_nr_running) { WARN_ON(prio < prev_prio); /* * This may have been our highest task, and therefore * we may have some recomputation to do */ if (prio == prev_prio) { struct rt_prio_array *array = &rt_rq->active; rt_rq->highest_prio.curr = sched_find_first_bit(array->bitmap); } } else rt_rq->highest_prio.curr = MAX_RT_PRIO; dec_rt_prio_smp(rt_rq, prio, prev_prio); } #else static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted++; if (rt_rq->tg) start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); } static void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted--; WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); } #else /* CONFIG_RT_GROUP_SCHED */ static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { start_rt_bandwidth(&def_rt_bandwidth); } static inline void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SCHED_HMP static void inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { inc_cumulative_runnable_avg(&rq->hmp_stats, p); } static void dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { dec_cumulative_runnable_avg(&rq->hmp_stats, p); } #else /* CONFIG_SCHED_HMP */ static inline void inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } static inline void dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } #endif /* CONFIG_SCHED_HMP */ static inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { int prio = rt_se_prio(rt_se); WARN_ON(!rt_prio(prio)); rt_rq->rt_nr_running++; inc_rt_prio(rt_rq, prio); inc_rt_migration(rt_se, rt_rq); inc_rt_group(rt_se, rt_rq); } static inline void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { WARN_ON(!rt_prio(rt_se_prio(rt_se))); WARN_ON(!rt_rq->rt_nr_running); rt_rq->rt_nr_running--; dec_rt_prio(rt_rq, rt_se_prio(rt_se)); dec_rt_migration(rt_se, rt_rq); dec_rt_group(rt_se, rt_rq); } static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; struct rt_rq *group_rq = group_rt_rq(rt_se); struct list_head *queue = array->queue + rt_se_prio(rt_se); /* * Don't enqueue the group if its throttled, or when empty. * The latter is a consequence of the former when a child group * get throttled and the current group doesn't have any other * active members. */ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) return; if (!rt_rq->rt_nr_running) list_add_leaf_rt_rq(rt_rq); if (head) list_add(&rt_se->run_list, queue); else list_add_tail(&rt_se->run_list, queue); __set_bit(rt_se_prio(rt_se), array->bitmap); inc_rt_tasks(rt_se, rt_rq); } static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; list_del_init(&rt_se->run_list); if (list_empty(array->queue + rt_se_prio(rt_se))) __clear_bit(rt_se_prio(rt_se), array->bitmap); dec_rt_tasks(rt_se, rt_rq); if (!rt_rq->rt_nr_running) list_del_leaf_rt_rq(rt_rq); } /* * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) { struct sched_rt_entity *back = NULL; for_each_sched_rt_entity(rt_se) { rt_se->back = back; back = rt_se; } for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) __dequeue_rt_entity(rt_se); } } static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) __enqueue_rt_entity(rt_se, head); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); for_each_sched_rt_entity(rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) __enqueue_rt_entity(rt_se, false); } } /* * Adding/removing a task to/from a priority array: */ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); inc_nr_running(rq); inc_hmp_sched_stats_rt(rq, p); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); dequeue_rt_entity(rt_se); dequeue_pushable_task(rq, p); dec_nr_running(rq); dec_hmp_sched_stats_rt(rq, p); } /* * Put task to the head or the end of the run list without the overhead of * dequeue followed by enqueue. */ static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) { if (on_rt_rq(rt_se)) { struct rt_prio_array *array = &rt_rq->active; struct list_head *queue = array->queue + rt_se_prio(rt_se); if (head) list_move(&rt_se->run_list, queue); else list_move_tail(&rt_se->run_list, queue); } } static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq; for_each_sched_rt_entity(rt_se) { rt_rq = rt_rq_of_se(rt_se); requeue_rt_entity(rt_rq, rt_se, head); } } static void yield_task_rt(struct rq *rq) { requeue_task_rt(rq, rq->curr, 0); } #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); static int select_task_rq_rt_hmp(struct task_struct *p, int sd_flag, int flags) { int cpu, target; cpu = task_cpu(p); rcu_read_lock(); target = find_lowest_rq(p); if (target != -1) cpu = target; rcu_read_unlock(); return cpu; } static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) { struct task_struct *curr; struct rq *rq; int cpu; cpu = task_cpu(p); if (p->nr_cpus_allowed == 1) goto out; if (sched_enable_hmp) return select_task_rq_rt_hmp(p, sd_flag, flags); /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) goto out; rq = cpu_rq(cpu); rcu_read_lock(); curr = ACCESS_ONCE(rq->curr); /* unlocked access */ /* * If the current task on @p's runqueue is an RT task, then * try to see if we can wake this RT task up on another * runqueue. Otherwise simply start this RT task * on its current runqueue. * * We want to avoid overloading runqueues. If the woken * task is a higher priority, then it will stay on this CPU * and the lower prio task should be moved to another CPU. * Even though this will probably make the lower prio task * lose its cache, we do not want to bounce a higher task * around just because it gave up its CPU, perhaps for a * lock? * * For equal prio tasks, we just let the scheduler sort it out. * * Otherwise, just let it ride on the affined RQ and the * post-schedule router will push the preempted task away * * This test is optimistic, if we get it wrong the load-balancer * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio) && (p->nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); if (target != -1) cpu = target; } rcu_read_unlock(); out: return cpu; } static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { if (rq->curr->nr_cpus_allowed == 1) return; if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) return; /* * There appears to be other cpus that can accept * current and none to run 'p', so lets reschedule * to try and push current away: */ requeue_task_rt(rq, p, 1); resched_task(rq->curr); } #endif /* CONFIG_SMP */ /* * Preempt the current task with a newly woken task if needed: */ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) { if (p->prio < rq->curr->prio) { resched_task(rq->curr); return; } #ifdef CONFIG_SMP /* * If: * * - the newly woken task is of equal priority to the current task * - the newly woken task is non-migratable while current is migratable * - current will be preempted on the next reschedule * * we should check to see if current can readily move to a different * cpu. If so, we will reschedule to allow the push logic to try * to move current somewhere else, making room for our non-migratable * task. */ if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) check_preempt_equal_prio(rq, p); #endif } static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq) { struct rt_prio_array *array = &rt_rq->active; struct sched_rt_entity *next = NULL; struct list_head *queue; int idx; idx = sched_find_first_bit(array->bitmap); BUG_ON(idx >= MAX_RT_PRIO); queue = array->queue + idx; next = list_entry(queue->next, struct sched_rt_entity, run_list); return next; } static struct task_struct *_pick_next_task_rt(struct rq *rq) { struct sched_rt_entity *rt_se; struct task_struct *p; struct rt_rq *rt_rq; rt_rq = &rq->rt; if (!rt_rq->rt_nr_running) return NULL; if (rt_rq_throttled(rt_rq)) return NULL; do { rt_se = pick_next_rt_entity(rq, rt_rq); BUG_ON(!rt_se); rt_rq = group_rt_rq(rt_se); } while (rt_rq); /* * Force update of rq->clock_task in case we failed to do so in * put_prev_task. A stale value can cause us to over-charge execution * time to real-time task, that could trigger throttling unnecessarily */ if (rq->skip_clock_update > 0) rq->skip_clock_update = 0; update_rq_clock(rq); p = rt_task_of(rt_se); p->se.exec_start = rq->clock_task; return p; } static struct task_struct *pick_next_task_rt(struct rq *rq) { struct task_struct *p = _pick_next_task_rt(rq); /* The running task is never eligible for pushing */ if (p) dequeue_pushable_task(rq, p); #ifdef CONFIG_SMP /* * We detect this state here so that we can avoid taking the RQ * lock again later if there is no need to push */ rq->post_schedule = has_pushable_tasks(rq); #endif return p; } static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); /* * The previous task needs to be made eligible for pushing * if it is still active */ if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } #ifdef CONFIG_SMP /* Only try algorithms three times */ #define RT_MAX_TRIES 3 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) return 1; return 0; } /* Return the second highest RT task, NULL otherwise */ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) { struct task_struct *next = NULL; struct sched_rt_entity *rt_se; struct rt_prio_array *array; struct rt_rq *rt_rq; int idx; for_each_leaf_rt_rq(rt_rq, rq) { array = &rt_rq->active; idx = sched_find_first_bit(array->bitmap); next_idx: if (idx >= MAX_RT_PRIO) continue; if (next && next->prio <= idx) continue; list_for_each_entry(rt_se, array->queue + idx, run_list) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) continue; p = rt_task_of(rt_se); if (pick_rt_task(rq, p, cpu)) { next = p; break; } } if (!next) { idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); goto next_idx; } } return next; } static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); #ifdef CONFIG_SCHED_HMP static int find_lowest_rq_hmp(struct task_struct *task) { struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); int cpu_cost, min_cost = INT_MAX; int best_cpu = -1; int i; /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) return best_cpu; if (task->nr_cpus_allowed == 1) return best_cpu; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) return best_cpu; /* No targets found */ /* * At this point we have built a mask of cpus representing the * lowest priority tasks in the system. Now we want to elect * the best one based on our affinity and topology. */ /* Skip performance considerations and optimize for power. * Worst case we'll be iterating over all CPUs here. CPU * online mask should be taken care of when constructing * the lowest_mask. */ for_each_cpu(i, lowest_mask) { struct rq *rq = cpu_rq(i); cpu_cost = power_cost_at_freq(i, ACCESS_ONCE(rq->min_freq)); trace_sched_cpu_load(rq, idle_cpu(i), mostly_idle_cpu(i), sched_irqload(i), cpu_cost, cpu_temp(i)); if (sched_boost() && capacity(rq) != max_capacity) continue; if (cpu_cost < min_cost && !sched_cpu_high_irqload(i)) { min_cost = cpu_cost; best_cpu = i; } } return best_cpu; } #else /* CONFIG_SCHED_HMP */ static int find_lowest_rq_hmp(struct task_struct *task) { return -1; } #endif /* CONFIG_SCHED_HMP */ static int find_lowest_rq(struct task_struct *task) { struct sched_domain *sd; struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); int this_cpu = smp_processor_id(); int cpu = task_cpu(task); if (sched_enable_hmp) return find_lowest_rq_hmp(task); /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) return -1; if (task->nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) return -1; /* No targets found */ /* * At this point we have built a mask of cpus representing the * lowest priority tasks in the system. Now we want to elect * the best one based on our affinity and topology. * * We prioritize the last cpu that the task executed on since * it is most likely cache-hot in that location. */ if (cpumask_test_cpu(cpu, lowest_mask)) return cpu; /* * Otherwise, we consult the sched_domains span maps to figure * out which cpu is logically closest to our hot cache data. */ if (!cpumask_test_cpu(this_cpu, lowest_mask)) this_cpu = -1; /* Skip this_cpu opt if not among lowest */ rcu_read_lock(); for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_AFFINE) { int best_cpu; /* * "this_cpu" is cheaper to preempt than a * remote processor. */ if (this_cpu != -1 && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return this_cpu; } best_cpu = cpumask_first_and(lowest_mask, sched_domain_span(sd)); if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; } } } rcu_read_unlock(); /* * And finally, if there were no matches within the domains * just give the caller *something* to work with from the compatible * locations. */ if (this_cpu != -1) return this_cpu; cpu = cpumask_any(lowest_mask); if (cpu < nr_cpu_ids) return cpu; return -1; } /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { struct rq *lowest_rq = NULL; int tries; int cpu; for (tries = 0; tries < RT_MAX_TRIES; tries++) { cpu = find_lowest_rq(task); if ((cpu == -1) || (cpu == rq->cpu)) break; lowest_rq = cpu_rq(cpu); /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { /* * We had to unlock the run queue. In * the mean time, task could have * migrated already or had its affinity changed. * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || !cpumask_test_cpu(lowest_rq->cpu, tsk_cpus_allowed(task)) || task_running(rq, task) || !task->on_rq)) { double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; break; } } /* If this rq is still suitable use it. */ if (lowest_rq->rt.highest_prio.curr > task->prio) break; /* try again */ double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; } return lowest_rq; } static struct task_struct *pick_next_pushable_task(struct rq *rq) { struct task_struct *p; if (!has_pushable_tasks(rq)) return NULL; p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks); BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!p->on_rq); BUG_ON(!rt_task(p)); return p; } /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task * of lesser priority. */ static int push_rt_task(struct rq *rq) { struct task_struct *next_task; struct rq *lowest_rq; int ret = 0; if (!rq->rt.overloaded) return 0; next_task = pick_next_pushable_task(rq); if (!next_task) return 0; retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); return 0; } /* * It's possible that the next_task slipped in of * higher priority than current. If that's the case * just reschedule current. */ if (unlikely(next_task->prio < rq->curr->prio)) { resched_task(rq->curr); return 0; } /* We might release rq lock */ get_task_struct(next_task); /* find_lock_lowest_rq locks the rq if found */ lowest_rq = find_lock_lowest_rq(next_task, rq); if (!lowest_rq) { struct task_struct *task; /* * find_lock_lowest_rq releases rq->lock * so it is possible that next_task has migrated. * * We need to make sure that the task is still on the same * run-queue and is also still the next task eligible for * pushing. */ task = pick_next_pushable_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* * The task hasn't migrated, and is still the next * eligible task, but we failed to find a run-queue * to push it to. Do not retry in this case, since * other cpus will pull from us when ready. */ goto out; } if (!task) /* No more tasks, just exit */ goto out; /* * Something has shifted, try again. */ put_task_struct(next_task); next_task = task; goto retry; } deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); ret = 1; resched_task(lowest_rq->curr); double_unlock_balance(rq, lowest_rq); out: put_task_struct(next_task); return ret; } static void push_rt_tasks(struct rq *rq) { /* push_rt_task will return true if it moved an RT */ while (push_rt_task(rq)) ; } static int pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, ret = 0, cpu; struct task_struct *p; struct rq *src_rq; if (likely(!rt_overloaded(this_rq))) return 0; for_each_cpu(cpu, this_rq->rd->rto_mask) { if (this_cpu == cpu) continue; src_rq = cpu_rq(cpu); /* * Don't bother taking the src_rq->lock if the next highest * task is known to be lower-priority than our current task. * This may look racy, but if this value is about to go * logically higher, the src_rq will push this task away. * And if its going logically lower, we do not care */ if (src_rq->rt.highest_prio.next >= this_rq->rt.highest_prio.curr) continue; /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could * alter this_rq */ double_lock_balance(this_rq, src_rq); /* * Are there still pullable RT tasks? */ if (src_rq->rt.rt_nr_running <= 1) goto skip; p = pick_next_highest_task_rt(src_rq, this_cpu); /* * Do we have an RT task that preempts * the to-be-scheduled task? */ if (p && (p->prio < this_rq->rt.highest_prio.curr)) { WARN_ON(p == src_rq->curr); WARN_ON(!p->on_rq); /* * There's a chance that p is higher in priority * than what's currently running on its cpu. * This is just that p is wakeing up and hasn't * had a chance to schedule. We only pull * p if it is lower in priority than the * current task on the run queue */ if (p->prio < src_rq->curr->prio) goto skip; ret = 1; deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); /* * We continue with the search, just in * case there's an even higher prio task * in another runqueue. (low likelihood * but possible) */ } skip: double_unlock_balance(this_rq, src_rq); } return ret; } static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) { /* Try to pull RT tasks here if we lower this rq's prio */ if (rq->rt.highest_prio.curr > prev->prio) pull_rt_task(rq); } static void post_schedule_rt(struct rq *rq) { push_rt_tasks(rq); } /* * If we are not running and we are not going to reschedule soon, we should * try to push tasks away now */ static void task_woken_rt(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && p->nr_cpus_allowed > 1 && rt_task(rq->curr) && (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } static void set_cpus_allowed_rt(struct task_struct *p, const struct cpumask *new_mask) { struct rq *rq; int weight; BUG_ON(!rt_task(p)); if (!p->on_rq) return; weight = cpumask_weight(new_mask); /* * Only update if the process changes its state from whether it * can migrate or not. */ if ((p->nr_cpus_allowed > 1) == (weight > 1)) return; rq = task_rq(p); /* * The process used to be able to migrate OR it can now migrate */ if (weight <= 1) { if (!task_current(rq, p)) dequeue_pushable_task(rq, p); BUG_ON(!rq->rt.rt_nr_migratory); rq->rt.rt_nr_migratory--; } else { if (!task_current(rq, p)) enqueue_pushable_task(rq, p); rq->rt.rt_nr_migratory++; } update_rt_migration(&rq->rt); } /* Assumes rq->lock is held */ static void rq_online_rt(struct rq *rq) { if (rq->rt.overloaded) rt_set_overload(rq); __enable_runtime(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); } /* Assumes rq->lock is held */ static void rq_offline_rt(struct rq *rq) { if (rq->rt.overloaded) rt_clear_overload(rq); __disable_runtime(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); } /* * When switch from the rt queue, we bring ourselves to a position * that we might want to pull RT tasks from other runqueues. */ static void switched_from_rt(struct rq *rq, struct task_struct *p) { /* * If there are other RT tasks then we will reschedule * and the scheduling of the other RT tasks will handle * the balancing. But if we are the last RT task * we may need to handle the pulling of RT tasks * now. */ if (!p->on_rq || rq->rt.rt_nr_running) return; if (pull_rt_task(rq)) resched_task(rq->curr); } void init_sched_rt_class(void) { unsigned int i; for_each_possible_cpu(i) { zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i)); } } #endif /* CONFIG_SMP */ /* * When switching a task to RT, we may overload the runqueue * with RT tasks. In this case we try to push them off to * other runqueues. */ static void switched_to_rt(struct rq *rq, struct task_struct *p) { int check_resched = 1; /* * If we are already running, then there's nothing * that needs to be done. But if we are not running * we may need to preempt the current running task. * If that current running task is also an RT task * then see if we can move to another run queue. */ if (p->on_rq && rq->curr != p) { #ifdef CONFIG_SMP if (rq->rt.overloaded && push_rt_task(rq) && /* Don't resched if we changed runqueues */ rq != task_rq(p)) check_resched = 0; #endif /* CONFIG_SMP */ if (check_resched && p->prio < rq->curr->prio) resched_task(rq->curr); } } /* * Priority of the task has changed. This may cause * us to initiate a push or pull. */ static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) { if (!p->on_rq) return; if (rq->curr == p) { #ifdef CONFIG_SMP /* * If our priority decreases while running, we * may need to pull tasks to this runqueue. */ if (oldprio < p->prio) pull_rt_task(rq); /* * If there's a higher priority task waiting to run * then reschedule. Note, the above pull_rt_task * can release the rq lock and p could migrate. * Only reschedule if p is still on the same runqueue. */ if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) resched_task(p); #else /* For UP simply resched on drop of prio */ if (oldprio < p->prio) resched_task(p); #endif /* CONFIG_SMP */ } else { /* * This task is not running, but if it is * greater than the current running task * then reschedule. */ if (p->prio < rq->curr->prio) resched_task(rq->curr); } } static void watchdog(struct rq *rq, struct task_struct *p) { unsigned long soft, hard; /* max may change after cur was read, this will be fixed next tick */ soft = task_rlimit(p, RLIMIT_RTTIME); hard = task_rlimit_max(p, RLIMIT_RTTIME); if (soft != RLIM_INFINITY) { unsigned long next; if (p->rt.watchdog_stamp != jiffies) { p->rt.timeout++; p->rt.watchdog_stamp = jiffies; } next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); if (p->rt.timeout > next) p->cputime_expires.sched_exp = p->se.sum_exec_runtime; } } static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) { struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); watchdog(rq, p); /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. */ if (p->policy != SCHED_RR) return; if (--p->rt.time_slice) return; p->rt.time_slice = sched_rr_timeslice; /* * Requeue to the end of queue if we (and all of our ancestors) are the * only element on the queue */ for_each_sched_rt_entity(rt_se) { if (rt_se->run_list.prev != rt_se->run_list.next) { requeue_task_rt(rq, p, 0); set_tsk_need_resched(p); return; } } } static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; p->se.exec_start = rq->clock_task; /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); } static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) { /* * Time slice is 0 for SCHED_FIFO tasks */ if (task->policy == SCHED_RR) return sched_rr_timeslice; else return 0; } const struct sched_class rt_sched_class = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, .check_preempt_curr = check_preempt_curr_rt, .pick_next_task = pick_next_task_rt, .put_prev_task = put_prev_task_rt, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_rt, .set_cpus_allowed = set_cpus_allowed_rt, .rq_online = rq_online_rt, .rq_offline = rq_offline_rt, .pre_schedule = pre_schedule_rt, .post_schedule = post_schedule_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, #endif .set_curr_task = set_curr_task_rt, .task_tick = task_tick_rt, .get_rr_interval = get_rr_interval_rt, .prio_changed = prio_changed_rt, .switched_to = switched_to_rt, #ifdef CONFIG_SCHED_HMP .inc_hmp_sched_stats = inc_hmp_sched_stats_rt, .dec_hmp_sched_stats = dec_hmp_sched_stats_rt, #endif }; #ifdef CONFIG_SCHED_DEBUG extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); void print_rt_stats(struct seq_file *m, int cpu) { rt_rq_iter_t iter; struct rt_rq *rt_rq; rcu_read_lock(); for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq); rcu_read_unlock(); } #endif /* CONFIG_SCHED_DEBUG */
BlissRoms-Kernels/kernel_motorola_BlissPure
kernel/sched/rt.c
C
gpl-2.0
51,646
/* * (C) Copyright 2003-2006 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * (C) Copyright 2004 * Mark Jonas, Freescale Semiconductor, mark.jonas@motorola.com. * * (C) Copyright 2004-2006 * Martin Krause, TQ-Systems GmbH, martin.krause@tqs.de * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <mpc5xxx.h> #include <pci.h> #include <asm/processor.h> #ifdef CONFIG_VIDEO_SM501 #include <sm501.h> #endif #if defined(CONFIG_MPC5200_DDR) #include "mt46v16m16-75.h" #else #include "mt48lc16m16a2-75.h" #endif #ifdef CONFIG_PS2MULT void ps2mult_early_init(void); #endif #ifndef CFG_RAMBOOT static void sdram_start (int hi_addr) { long hi_addr_bit = hi_addr ? 0x01000000 : 0; /* unlock mode register */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000000 | hi_addr_bit; __asm__ volatile ("sync"); /* precharge all banks */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000002 | hi_addr_bit; __asm__ volatile ("sync"); #if SDRAM_DDR /* set mode register: extended mode */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_EMODE; __asm__ volatile ("sync"); /* set mode register: reset DLL */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_MODE | 0x04000000; __asm__ volatile ("sync"); #endif /* precharge all banks */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000002 | hi_addr_bit; __asm__ volatile ("sync"); /* auto refresh */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000004 | hi_addr_bit; __asm__ volatile ("sync"); /* set mode register */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_MODE; __asm__ volatile ("sync"); /* normal operation */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | hi_addr_bit; __asm__ volatile ("sync"); } #endif /* * ATTENTION: Although partially referenced initdram does NOT make real use * use of CFG_SDRAM_BASE. The code does not work if CFG_SDRAM_BASE * is something else than 0x00000000. */ #if defined(CONFIG_MPC5200) long int initdram (int board_type) { ulong dramsize = 0; ulong dramsize2 = 0; uint svr, pvr; #ifndef CFG_RAMBOOT ulong test1, test2; /* setup SDRAM chip selects */ *(vu_long *)MPC5XXX_SDRAM_CS0CFG = 0x0000001c; /* 512MB at 0x0 */ *(vu_long *)MPC5XXX_SDRAM_CS1CFG = 0x40000000; /* disabled */ __asm__ volatile ("sync"); /* setup config registers */ *(vu_long *)MPC5XXX_SDRAM_CONFIG1 = SDRAM_CONFIG1; *(vu_long *)MPC5XXX_SDRAM_CONFIG2 = SDRAM_CONFIG2; __asm__ volatile ("sync"); #if SDRAM_DDR /* set tap delay */ *(vu_long *)MPC5XXX_CDM_PORCFG = SDRAM_TAPDELAY; __asm__ volatile ("sync"); #endif /* find RAM size using SDRAM CS0 only */ sdram_start(0); test1 = get_ram_size((long *)CFG_SDRAM_BASE, 0x20000000); sdram_start(1); test2 = get_ram_size((long *)CFG_SDRAM_BASE, 0x20000000); if (test1 > test2) { sdram_start(0); dramsize = test1; } else { dramsize = test2; } /* memory smaller than 1MB is impossible */ if (dramsize < (1 << 20)) { dramsize = 0; } /* set SDRAM CS0 size according to the amount of RAM found */ if (dramsize > 0) { *(vu_long *)MPC5XXX_SDRAM_CS0CFG = 0x13 + __builtin_ffs(dramsize >> 20) - 1; } else { *(vu_long *)MPC5XXX_SDRAM_CS0CFG = 0; /* disabled */ } /* let SDRAM CS1 start right after CS0 */ *(vu_long *)MPC5XXX_SDRAM_CS1CFG = dramsize + 0x0000001c; /* 512MB */ /* find RAM size using SDRAM CS1 only */ sdram_start(0); test1 = get_ram_size((long *)(CFG_SDRAM_BASE + dramsize), 0x20000000); sdram_start(1); test2 = get_ram_size((long *)(CFG_SDRAM_BASE + dramsize), 0x20000000); if (test1 > test2) { sdram_start(0); dramsize2 = test1; } else { dramsize2 = test2; } /* memory smaller than 1MB is impossible */ if (dramsize2 < (1 << 20)) { dramsize2 = 0; } /* set SDRAM CS1 size according to the amount of RAM found */ if (dramsize2 > 0) { *(vu_long *)MPC5XXX_SDRAM_CS1CFG = dramsize | (0x13 + __builtin_ffs(dramsize2 >> 20) - 1); } else { *(vu_long *)MPC5XXX_SDRAM_CS1CFG = dramsize; /* disabled */ } #else /* CFG_RAMBOOT */ /* retrieve size of memory connected to SDRAM CS0 */ dramsize = *(vu_long *)MPC5XXX_SDRAM_CS0CFG & 0xFF; if (dramsize >= 0x13) { dramsize = (1 << (dramsize - 0x13)) << 20; } else { dramsize = 0; } /* retrieve size of memory connected to SDRAM CS1 */ dramsize2 = *(vu_long *)MPC5XXX_SDRAM_CS1CFG & 0xFF; if (dramsize2 >= 0x13) { dramsize2 = (1 << (dramsize2 - 0x13)) << 20; } else { dramsize2 = 0; } #endif /* CFG_RAMBOOT */ /* * On MPC5200B we need to set the special configuration delay in the * DDR controller. Please refer to Freescale's AN3221 "MPC5200B SDRAM * Initialization and Configuration", 3.3.1 SDelay--MBAR + 0x0190: * * "The SDelay should be written to a value of 0x00000004. It is * required to account for changes caused by normal wafer processing * parameters." */ svr = get_svr(); pvr = get_pvr(); if ((SVR_MJREV(svr) >= 2) && (PVR_MAJ(pvr) == 1) && (PVR_MIN(pvr) == 4)) { *(vu_long *)MPC5XXX_SDRAM_SDELAY = 0x04; __asm__ volatile ("sync"); } #if defined(CONFIG_TQM5200_B) return dramsize + dramsize2; #else return dramsize; #endif /* CONFIG_TQM5200_B */ } #elif defined(CONFIG_MGT5100) long int initdram (int board_type) { ulong dramsize = 0; #ifndef CFG_RAMBOOT ulong test1, test2; /* setup and enable SDRAM chip selects */ *(vu_long *)MPC5XXX_SDRAM_START = 0x00000000; *(vu_long *)MPC5XXX_SDRAM_STOP = 0x0000ffff;/* 2G */ *(vu_long *)MPC5XXX_ADDECR |= (1 << 22); /* Enable SDRAM */ __asm__ volatile ("sync"); /* setup config registers */ *(vu_long *)MPC5XXX_SDRAM_CONFIG1 = SDRAM_CONFIG1; *(vu_long *)MPC5XXX_SDRAM_CONFIG2 = SDRAM_CONFIG2; /* address select register */ *(vu_long *)MPC5XXX_SDRAM_XLBSEL = SDRAM_ADDRSEL; __asm__ volatile ("sync"); /* find RAM size */ sdram_start(0); test1 = get_ram_size((ulong *)CFG_SDRAM_BASE, 0x80000000); sdram_start(1); test2 = get_ram_size((ulong *)CFG_SDRAM_BASE, 0x80000000); if (test1 > test2) { sdram_start(0); dramsize = test1; } else { dramsize = test2; } /* set SDRAM end address according to size */ *(vu_long *)MPC5XXX_SDRAM_STOP = ((dramsize - 1) >> 15); #else /* CFG_RAMBOOT */ /* Retrieve amount of SDRAM available */ dramsize = ((*(vu_long *)MPC5XXX_SDRAM_STOP + 1) << 15); #endif /* CFG_RAMBOOT */ return dramsize; } #else #error Neither CONFIG_MPC5200 or CONFIG_MGT5100 defined #endif int checkboard (void) { #if defined(CONFIG_AEVFIFO) puts ("Board: AEVFIFO\n"); return 0; #endif #if defined(CONFIG_TQM5200S) # define MODULE_NAME "TQM5200S" #else # define MODULE_NAME "TQM5200" #endif #if defined(CONFIG_STK52XX) # define CARRIER_NAME "STK52xx" #elif defined(CONFIG_TB5200) # define CARRIER_NAME "TB5200" #elif defined(CONFIG_CAM5200) # define CARRIER_NAME "Cam5200" #else # error "Unknown carrier board" #endif puts ( "Board: " MODULE_NAME " (TQ-Components GmbH)\n" " on a " CARRIER_NAME " carrier board\n"); return 0; } #undef MODULE_NAME #undef CARRIER_NAME void flash_preinit(void) { /* * Now, when we are in RAM, enable flash write * access for detection process. * Note that CS_BOOT cannot be cleared when * executing in flash. */ #if defined(CONFIG_MGT5100) *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 25); /* disable CS_BOOT */ *(vu_long *)MPC5XXX_ADDECR |= (1 << 16); /* enable CS0 */ #endif *(vu_long *)MPC5XXX_BOOTCS_CFG &= ~0x1; /* clear RO */ } #ifdef CONFIG_PCI static struct pci_controller hose; extern void pci_mpc5xxx_init(struct pci_controller *); void pci_init_board(void) { pci_mpc5xxx_init(&hose); } #endif #if defined (CFG_CMD_IDE) && defined (CONFIG_IDE_RESET) #if defined (CONFIG_MINIFAP) #define SM501_POWER_MODE0_GATE 0x00000040UL #define SM501_POWER_MODE1_GATE 0x00000048UL #define POWER_MODE_GATE_GPIO_PWM_I2C 0x00000040UL #define SM501_GPIO_DATA_DIR_HIGH 0x0001000CUL #define SM501_GPIO_DATA_HIGH 0x00010004UL #define SM501_GPIO_51 0x00080000UL #else #define GPIO_PSC1_4 0x01000000UL #endif void init_ide_reset (void) { debug ("init_ide_reset\n"); #if defined (CONFIG_MINIFAP) /* Configure GPIO_51 of the SM501 grafic controller as ATA reset */ /* enable GPIO control (in both power modes) */ *(vu_long *) (SM501_MMIO_BASE+SM501_POWER_MODE0_GATE) |= POWER_MODE_GATE_GPIO_PWM_I2C; *(vu_long *) (SM501_MMIO_BASE+SM501_POWER_MODE1_GATE) |= POWER_MODE_GATE_GPIO_PWM_I2C; /* configure GPIO51 as output */ *(vu_long *) (SM501_MMIO_BASE+SM501_GPIO_DATA_DIR_HIGH) |= SM501_GPIO_51; #else /* Configure PSC1_4 as GPIO output for ATA reset */ *(vu_long *) MPC5XXX_WU_GPIO_ENABLE |= GPIO_PSC1_4; *(vu_long *) MPC5XXX_WU_GPIO_DIR |= GPIO_PSC1_4; #endif } void ide_set_reset (int idereset) { debug ("ide_reset(%d)\n", idereset); #if defined (CONFIG_MINIFAP) if (idereset) { *(vu_long *) (SM501_MMIO_BASE+SM501_GPIO_DATA_HIGH) &= ~SM501_GPIO_51; } else { *(vu_long *) (SM501_MMIO_BASE+SM501_GPIO_DATA_HIGH) |= SM501_GPIO_51; } #else if (idereset) { *(vu_long *) MPC5XXX_WU_GPIO_DATA &= ~GPIO_PSC1_4; } else { *(vu_long *) MPC5XXX_WU_GPIO_DATA |= GPIO_PSC1_4; } #endif } #endif /* defined (CFG_CMD_IDE) && defined (CONFIG_IDE_RESET) */ #ifdef CONFIG_POST /* * Reads GPIO pin PSC6_3. A keypress is reported, if PSC6_3 is low. If PSC6_3 * is left open, no keypress is detected. */ int post_hotkeys_pressed(void) { struct mpc5xxx_gpio *gpio; gpio = (struct mpc5xxx_gpio*) MPC5XXX_GPIO; /* * Configure PSC6_1 and PSC6_3 as GPIO. PSC6 then couldn't be used in * CODEC or UART mode. Consumer IrDA should still be possible. */ gpio->port_config &= ~(0x07000000); gpio->port_config |= 0x03000000; /* Enable GPIO for GPIO_IRDA_1 (IR_USB_CLK pin) = PSC6_3 */ gpio->simple_gpioe |= 0x20000000; /* Configure GPIO_IRDA_1 as input */ gpio->simple_ddr &= ~(0x20000000); return ((gpio->simple_ival & 0x20000000) ? 0 : 1); } #endif #if defined(CONFIG_POST) || defined(CONFIG_LOGBUFFER) void post_word_store (ulong a) { volatile ulong *save_addr = (volatile ulong *)(MPC5XXX_SRAM + MPC5XXX_SRAM_POST_SIZE); *save_addr = a; } ulong post_word_load (void) { volatile ulong *save_addr = (volatile ulong *)(MPC5XXX_SRAM + MPC5XXX_SRAM_POST_SIZE); return *save_addr; } #endif /* CONFIG_POST || CONFIG_LOGBUFFER*/ #ifdef CONFIG_PS2MULT #ifdef CONFIG_BOARD_EARLY_INIT_R int board_early_init_r (void) { ps2mult_early_init(); return (0); } #endif #endif /* CONFIG_PS2MULT */ int last_stage_init (void) { /* * auto scan for really existing devices and re-set chip select * configuration. */ u16 save, tmp; int restore; /* * Check for SRAM and SRAM size */ /* save original SRAM content */ save = *(volatile u16 *)CFG_CS2_START; restore = 1; /* write test pattern to SRAM */ *(volatile u16 *)CFG_CS2_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in SRAM detection\n"); if (*(volatile u16 *)CFG_CS2_START != 0xA5A5) { /* no SRAM at all, disable cs */ *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 18); *(vu_long *)MPC5XXX_CS2_START = 0x0000FFFF; *(vu_long *)MPC5XXX_CS2_STOP = 0x0000FFFF; restore = 0; __asm__ volatile ("sync"); } else if (*(volatile u16 *)(CFG_CS2_START + (1<<19)) == 0xA5A5) { /* make sure that we access a mirrored address */ *(volatile u16 *)CFG_CS2_START = 0x1111; __asm__ volatile ("sync"); if (*(volatile u16 *)(CFG_CS2_START + (1<<19)) == 0x1111) { /* SRAM size = 512 kByte */ *(vu_long *)MPC5XXX_CS2_STOP = STOP_REG(CFG_CS2_START, 0x80000); __asm__ volatile ("sync"); puts ("SRAM: 512 kB\n"); } else puts ("!! possible error in SRAM detection\n"); } else { puts ("SRAM: 1 MB\n"); } /* restore origianl SRAM content */ if (restore) { *(volatile u16 *)CFG_CS2_START = save; __asm__ volatile ("sync"); } /* * Check for Grafic Controller */ /* save origianl FB content */ save = *(volatile u16 *)CFG_CS1_START; restore = 1; /* write test pattern to FB memory */ *(volatile u16 *)CFG_CS1_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in grafic controller detection\n"); if (*(volatile u16 *)CFG_CS1_START != 0xA5A5) { /* no grafic controller at all, disable cs */ *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 17); *(vu_long *)MPC5XXX_CS1_START = 0x0000FFFF; *(vu_long *)MPC5XXX_CS1_STOP = 0x0000FFFF; restore = 0; __asm__ volatile ("sync"); } else { puts ("VGA: SMI501 (Voyager) with 8 MB\n"); } /* restore origianl FB content */ if (restore) { *(volatile u16 *)CFG_CS1_START = save; __asm__ volatile ("sync"); } return 0; } #ifdef CONFIG_VIDEO_SM501 #define DISPLAY_WIDTH 640 #define DISPLAY_HEIGHT 480 #ifdef CONFIG_VIDEO_SM501_8BPP #error CONFIG_VIDEO_SM501_8BPP not supported. #endif /* CONFIG_VIDEO_SM501_8BPP */ #ifdef CONFIG_VIDEO_SM501_16BPP #error CONFIG_VIDEO_SM501_16BPP not supported. #endif /* CONFIG_VIDEO_SM501_16BPP */ #ifdef CONFIG_VIDEO_SM501_32BPP static const SMI_REGS init_regs [] = { #if 0 /* CRT only */ {0x00004, 0x0}, {0x00048, 0x00021807}, {0x0004C, 0x10090a01}, {0x00054, 0x1}, {0x00040, 0x00021807}, {0x00044, 0x10090a01}, {0x00054, 0x0}, {0x80200, 0x00010000}, {0x80204, 0x0}, {0x80208, 0x0A000A00}, {0x8020C, 0x02fa027f}, {0x80210, 0x004a028b}, {0x80214, 0x020c01df}, {0x80218, 0x000201e9}, {0x80200, 0x00013306}, #else /* panel + CRT */ {0x00004, 0x0}, {0x00048, 0x00021807}, {0x0004C, 0x091a0a01}, {0x00054, 0x1}, {0x00040, 0x00021807}, {0x00044, 0x091a0a01}, {0x00054, 0x0}, {0x80000, 0x0f013106}, {0x80004, 0xc428bb17}, {0x8000C, 0x00000000}, {0x80010, 0x0a000a00}, {0x80014, 0x02800000}, {0x80018, 0x01e00000}, {0x8001C, 0x00000000}, {0x80020, 0x01e00280}, {0x80024, 0x02fa027f}, {0x80028, 0x004a028b}, {0x8002C, 0x020c01df}, {0x80030, 0x000201e9}, {0x80200, 0x00010000}, #endif {0, 0} }; #endif /* CONFIG_VIDEO_SM501_32BPP */ #ifdef CONFIG_CONSOLE_EXTRA_INFO /* * Return text to be printed besides the logo. */ void video_get_info_str (int line_number, char *info) { if (line_number == 1) { strcpy (info, " Board: TQM5200 (TQ-Components GmbH)"); #if defined (CONFIG_STK52XX) || defined (CONFIG_TB5200) } else if (line_number == 2) { #if defined (CONFIG_STK52XX) strcpy (info, " on a STK52xx carrier board"); #endif #if defined (CONFIG_TB5200) strcpy (info, " on a TB5200 carrier board"); #endif #endif } else { info [0] = '\0'; } } #endif /* * Returns SM501 register base address. First thing called in the * driver. Checks if SM501 is physically present. */ unsigned int board_video_init (void) { u16 save, tmp; int restore, ret; /* * Check for Grafic Controller */ /* save origianl FB content */ save = *(volatile u16 *)CFG_CS1_START; restore = 1; /* write test pattern to FB memory */ *(volatile u16 *)CFG_CS1_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in grafic controller detection\n"); if (*(volatile u16 *)CFG_CS1_START != 0xA5A5) { /* no grafic controller found */ restore = 0; ret = 0; } else { ret = SM501_MMIO_BASE; } if (restore) { *(volatile u16 *)CFG_CS1_START = save; __asm__ volatile ("sync"); } return ret; } /* * Returns SM501 framebuffer address */ unsigned int board_video_get_fb (void) { return SM501_FB_BASE; } /* * Called after initializing the SM501 and before clearing the screen. */ void board_validate_screen (unsigned int base) { } /* * Return a pointer to the initialization sequence. */ const SMI_REGS *board_get_regs (void) { return init_regs; } int board_get_width (void) { return DISPLAY_WIDTH; } int board_get_height (void) { return DISPLAY_HEIGHT; } #endif /* CONFIG_VIDEO_SM501 */
eldarerathis/FIREFIREFIRE-Multiboot-PoC
board/tqm5200/tqm5200.c
C
gpl-2.0
16,892
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ /* * test_ocspchecker.c * * Test OcspChecker function * */ #include "testutil.h" #include "testutil_nss.h" static void *plContext = NULL; static void printUsage(void) { (void)printf("\nUSAGE:\nOcspChecker -d <certStoreDirectory> TestName " "[ENE|EE] <certLocationDirectory> <trustedCert> " "<targetCert>\n\n"); (void)printf("Validates a chain of certificates between " "<trustedCert> and <targetCert>\n" "using the certs and CRLs in <certLocationDirectory> and " "pkcs11 db from <certStoreDirectory>. " "If ENE is specified,\n" "then an Error is Not Expected. " "If EE is specified, an Error is Expected.\n"); } static char * createFullPathName( char *dirName, char *certFile, void *plContext) { PKIX_UInt32 certFileLen; PKIX_UInt32 dirNameLen; char *certPathName = NULL; PKIX_TEST_STD_VARS(); certFileLen = PL_strlen(certFile); dirNameLen = PL_strlen(dirName); PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_Malloc(dirNameLen + certFileLen + 2, (void **)&certPathName, plContext)); PL_strcpy(certPathName, dirName); PL_strcat(certPathName, "/"); PL_strcat(certPathName, certFile); printf("certPathName = %s\n", certPathName); cleanup: PKIX_TEST_RETURN(); return (certPathName); } static PKIX_Error * testDefaultCertStore(PKIX_ValidateParams *valParams, char *crlDir) { PKIX_PL_String *dirString = NULL; PKIX_CertStore *certStore = NULL; PKIX_ProcessingParams *procParams = NULL; PKIX_PL_Date *validity = NULL; PKIX_List *revCheckers = NULL; PKIX_RevocationChecker *revChecker = NULL; PKIX_PL_Object *revCheckerContext = NULL; PKIX_OcspChecker *ocspChecker = NULL; PKIX_TEST_STD_VARS(); subTest("PKIX_PL_CollectionCertStoreContext_Create"); /* Create CollectionCertStore */ PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_String_Create(PKIX_ESCASCII, crlDir, 0, &dirString, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_CollectionCertStore_Create(dirString, &certStore, plContext)); /* Create CertStore */ PKIX_TEST_EXPECT_NO_ERROR(PKIX_ValidateParams_GetProcessingParams(valParams, &procParams, plContext)); subTest("PKIX_ProcessingParams_AddCertStore"); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ProcessingParams_AddCertStore(procParams, certStore, plContext)); subTest("PKIX_ProcessingParams_SetRevocationEnabled"); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ProcessingParams_SetRevocationEnabled(procParams, PKIX_FALSE, plContext)); /* create current Date */ PKIX_TEST_EXPECT_NO_ERROR(pkix_pl_Date_CreateFromPRTime(PR_Now(), &validity, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_Create(&revCheckers, plContext)); /* create revChecker */ PKIX_TEST_EXPECT_NO_ERROR(PKIX_OcspChecker_Initialize(validity, NULL, /* pwArg */ NULL, /* Use default responder */ &revChecker, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_RevocationChecker_GetRevCheckerContext(revChecker, &revCheckerContext, plContext)); /* Check that this object is a ocsp checker */ PKIX_TEST_EXPECT_NO_ERROR(pkix_CheckType(revCheckerContext, PKIX_OCSPCHECKER_TYPE, plContext)); ocspChecker = (PKIX_OcspChecker *)revCheckerContext; PKIX_TEST_EXPECT_NO_ERROR(PKIX_OcspChecker_SetVerifyFcn(ocspChecker, PKIX_PL_OcspResponse_UseBuildChain, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_AppendItem(revCheckers, (PKIX_PL_Object *)revChecker, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ProcessingParams_SetRevocationCheckers(procParams, revCheckers, plContext)); cleanup: PKIX_TEST_DECREF_AC(dirString); PKIX_TEST_DECREF_AC(procParams); PKIX_TEST_DECREF_AC(certStore); PKIX_TEST_DECREF_AC(revCheckers); PKIX_TEST_DECREF_AC(revChecker); PKIX_TEST_DECREF_AC(ocspChecker); PKIX_TEST_DECREF_AC(validity); PKIX_TEST_RETURN(); return (0); } int test_ocsp(int argc, char *argv[]) { PKIX_ValidateParams *valParams = NULL; PKIX_ProcessingParams *procParams = NULL; PKIX_ComCertSelParams *certSelParams = NULL; PKIX_CertSelector *certSelector = NULL; PKIX_ValidateResult *valResult = NULL; PKIX_UInt32 actualMinorVersion; PKIX_UInt32 j = 0; PKIX_UInt32 k = 0; PKIX_UInt32 chainLength = 0; PKIX_Boolean testValid = PKIX_TRUE; PKIX_List *chainCerts = NULL; PKIX_VerifyNode *verifyTree = NULL; PKIX_PL_String *verifyString = NULL; PKIX_PL_Cert *dirCert = NULL; PKIX_PL_Cert *trustedCert = NULL; PKIX_PL_Cert *targetCert = NULL; PKIX_TrustAnchor *anchor = NULL; PKIX_List *anchors = NULL; char *dirCertName = NULL; char *anchorCertName = NULL; char *dirName = NULL; char *databaseDir = NULL; PKIX_TEST_STD_VARS(); if (argc < 5) { printUsage(); return (0); } startTests("OcspChecker"); PKIX_TEST_EXPECT_NO_ERROR( PKIX_PL_NssContext_Create(0, PKIX_FALSE, NULL, &plContext)); /* ENE = expect no error; EE = expect error */ if (PORT_Strcmp(argv[2 + j], "ENE") == 0) { testValid = PKIX_TRUE; } else if (PORT_Strcmp(argv[2 + j], "EE") == 0) { testValid = PKIX_FALSE; } else { printUsage(); return (0); } subTest(argv[1 + j]); dirName = argv[3 + j]; chainLength = argc - j - 5; PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_Create(&chainCerts, plContext)); for (k = 0; k < chainLength; k++) { dirCert = createCert(dirName, argv[5 + k + j], plContext); if (k == 0) { PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_Object_IncRef((PKIX_PL_Object *)dirCert, plContext)); targetCert = dirCert; } PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_AppendItem(chainCerts, (PKIX_PL_Object *)dirCert, plContext)); PKIX_TEST_DECREF_BC(dirCert); } /* create processing params with list of trust anchors */ anchorCertName = argv[4 + j]; trustedCert = createCert(dirName, anchorCertName, plContext); PKIX_TEST_EXPECT_NO_ERROR(PKIX_TrustAnchor_CreateWithCert(trustedCert, &anchor, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_Create(&anchors, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_List_AppendItem(anchors, (PKIX_PL_Object *)anchor, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ProcessingParams_Create(anchors, &procParams, plContext)); /* create CertSelector with target certificate in params */ PKIX_TEST_EXPECT_NO_ERROR(PKIX_ComCertSelParams_Create(&certSelParams, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ComCertSelParams_SetCertificate(certSelParams, targetCert, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_CertSelector_Create(NULL, NULL, &certSelector, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_CertSelector_SetCommonCertSelectorParams(certSelector, certSelParams, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ProcessingParams_SetTargetCertConstraints(procParams, certSelector, plContext)); PKIX_TEST_EXPECT_NO_ERROR(PKIX_ValidateParams_Create(procParams, chainCerts, &valParams, plContext)); testDefaultCertStore(valParams, dirName); pkixTestErrorResult = PKIX_ValidateChain(valParams, &valResult, &verifyTree, plContext); if (pkixTestErrorResult) { if (testValid == PKIX_FALSE) { /* EE */ (void)printf("EXPECTED ERROR RECEIVED!\n"); } else { /* ENE */ testError("UNEXPECTED ERROR RECEIVED"); } PKIX_TEST_DECREF_BC(pkixTestErrorResult); } else { if (testValid == PKIX_TRUE) { /* ENE */ (void)printf("EXPECTED SUCCESSFUL VALIDATION!\n"); } else { /* EE */ (void)printf("UNEXPECTED SUCCESSFUL VALIDATION!\n"); } } subTest("Displaying VerifyTree"); if (verifyTree == NULL) { (void)printf("VerifyTree is NULL\n"); } else { PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_Object_ToString((PKIX_PL_Object *)verifyTree, &verifyString, plContext)); (void)printf("verifyTree is\n%s\n", verifyString->escAsciiString); PKIX_TEST_DECREF_BC(verifyString); PKIX_TEST_DECREF_BC(verifyTree); } cleanup: PKIX_TEST_DECREF_AC(valParams); PKIX_TEST_DECREF_AC(procParams); PKIX_TEST_DECREF_AC(certSelParams); PKIX_TEST_DECREF_AC(certSelector); PKIX_TEST_DECREF_AC(chainCerts); PKIX_TEST_DECREF_AC(anchors); PKIX_TEST_DECREF_AC(anchor); PKIX_TEST_DECREF_AC(trustedCert); PKIX_TEST_DECREF_AC(targetCert); PKIX_TEST_DECREF_AC(valResult); PKIX_Shutdown(plContext); PKIX_TEST_RETURN(); endTests("OcspChecker"); return (0); }
Yukarumya/Yukarum-Redfoxes
security/nss/cmd/libpkix/pkix/top/test_ocsp.c
C
mpl-2.0
9,426
/* Implementation of the SET_EXPONENT intrinsic Copyright 2003 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) Libgfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with libgfortran; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include <math.h> #include "libgfortran.h" #if defined (HAVE_GFC_REAL_16) && defined (HAVE_SCALBNL) && defined (HAVE_FREXPL) extern GFC_REAL_16 set_exponent_r16 (GFC_REAL_16 s, GFC_INTEGER_4 i); export_proto(set_exponent_r16); GFC_REAL_16 set_exponent_r16 (GFC_REAL_16 s, GFC_INTEGER_4 i) { int dummy_exp; return scalbnl (frexpl (s, &dummy_exp), i); } #endif
shaotuanchen/sunflower_exp
tools/source/gcc-4.2.4/libgfortran/generated/set_exponent_r16.c
C
bsd-3-clause
1,802
/* * Builtin "git tag" * * Copyright (c) 2007 Kristian Høgsberg <krh@redhat.com>, * Carlos Rica <jasampler@gmail.com> * Based on git-tag.sh and mktag.c by Linus Torvalds. */ #include "cache.h" #include "builtin.h" #include "refs.h" #include "tag.h" #include "run-command.h" #include "parse-options.h" #include "diff.h" #include "revision.h" #include "gpg-interface.h" #include "sha1-array.h" #include "column.h" #include "ref-filter.h" static const char * const git_tag_usage[] = { N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] <tagname> [<head>]"), N_("git tag -d <tagname>..."), N_("git tag -l [-n[<num>]] [--contains <commit>] [--points-at <object>]" "\n\t\t[--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]"), N_("git tag -v <tagname>..."), NULL }; static unsigned int colopts; static int list_tags(struct ref_filter *filter, struct ref_sorting *sorting, const char *format) { struct ref_array array; char *to_free = NULL; int i; memset(&array, 0, sizeof(array)); if (filter->lines == -1) filter->lines = 0; if (!format) { if (filter->lines) { to_free = xstrfmt("%s %%(contents:lines=%d)", "%(align:15)%(refname:strip=2)%(end)", filter->lines); format = to_free; } else format = "%(refname:strip=2)"; } verify_ref_format(format); filter->with_commit_tag_algo = 1; filter_refs(&array, filter, FILTER_REFS_TAGS); ref_array_sort(sorting, &array); for (i = 0; i < array.nr; i++) show_ref_array_item(array.items[i], format, 0); ref_array_clear(&array); free(to_free); return 0; } typedef int (*each_tag_name_fn)(const char *name, const char *ref, const unsigned char *sha1); static int for_each_tag_name(const char **argv, each_tag_name_fn fn) { const char **p; char ref[PATH_MAX]; int had_error = 0; unsigned char sha1[20]; for (p = argv; *p; p++) { if (snprintf(ref, sizeof(ref), "refs/tags/%s", *p) >= sizeof(ref)) { error(_("tag name too long: %.*s..."), 50, *p); had_error = 1; continue; } if (read_ref(ref, sha1)) { error(_("tag '%s' not found."), *p); had_error = 1; continue; } if (fn(*p, ref, sha1)) had_error = 1; } return had_error; } static int delete_tag(const char *name, const char *ref, const unsigned char *sha1) { if (delete_ref(ref, sha1, 0)) return 1; printf(_("Deleted tag '%s' (was %s)\n"), name, find_unique_abbrev(sha1, DEFAULT_ABBREV)); return 0; } static int verify_tag(const char *name, const char *ref, const unsigned char *sha1) { const char *argv_verify_tag[] = {"verify-tag", "-v", "SHA1_HEX", NULL}; argv_verify_tag[2] = sha1_to_hex(sha1); if (run_command_v_opt(argv_verify_tag, RUN_GIT_CMD)) return error(_("could not verify the tag '%s'"), name); return 0; } static int do_sign(struct strbuf *buffer) { return sign_buffer(buffer, buffer, get_signing_key()); } static const char tag_template[] = N_("\nWrite a message for tag:\n %s\n" "Lines starting with '%c' will be ignored.\n"); static const char tag_template_nocleanup[] = N_("\nWrite a message for tag:\n %s\n" "Lines starting with '%c' will be kept; you may remove them" " yourself if you want to.\n"); /* Parse arg given and add it the ref_sorting array */ static int parse_sorting_string(const char *arg, struct ref_sorting **sorting_tail) { struct ref_sorting *s; int len; s = xcalloc(1, sizeof(*s)); s->next = *sorting_tail; *sorting_tail = s; if (*arg == '-') { s->reverse = 1; arg++; } if (skip_prefix(arg, "version:", &arg) || skip_prefix(arg, "v:", &arg)) s->version = 1; len = strlen(arg); s->atom = parse_ref_filter_atom(arg, arg+len); return 0; } static int git_tag_config(const char *var, const char *value, void *cb) { int status; struct ref_sorting **sorting_tail = (struct ref_sorting **)cb; if (!strcmp(var, "tag.sort")) { if (!value) return config_error_nonbool(var); parse_sorting_string(value, sorting_tail); return 0; } status = git_gpg_config(var, value, cb); if (status) return status; if (starts_with(var, "column.")) return git_column_config(var, value, "tag", &colopts); return git_default_config(var, value, cb); } static void write_tag_body(int fd, const unsigned char *sha1) { unsigned long size; enum object_type type; char *buf, *sp; buf = read_sha1_file(sha1, &type, &size); if (!buf) return; /* skip header */ sp = strstr(buf, "\n\n"); if (!sp || !size || type != OBJ_TAG) { free(buf); return; } sp += 2; /* skip the 2 LFs */ write_or_die(fd, sp, parse_signature(sp, buf + size - sp)); free(buf); } static int build_tag_object(struct strbuf *buf, int sign, unsigned char *result) { if (sign && do_sign(buf) < 0) return error(_("unable to sign the tag")); if (write_sha1_file(buf->buf, buf->len, tag_type, result) < 0) return error(_("unable to write tag file")); return 0; } struct create_tag_options { unsigned int message_given:1; unsigned int sign; enum { CLEANUP_NONE, CLEANUP_SPACE, CLEANUP_ALL } cleanup_mode; }; static void create_tag(const unsigned char *object, const char *tag, struct strbuf *buf, struct create_tag_options *opt, unsigned char *prev, unsigned char *result) { enum object_type type; char header_buf[1024]; int header_len; char *path = NULL; type = sha1_object_info(object, NULL); if (type <= OBJ_NONE) die(_("bad object type.")); header_len = snprintf(header_buf, sizeof(header_buf), "object %s\n" "type %s\n" "tag %s\n" "tagger %s\n\n", sha1_to_hex(object), typename(type), tag, git_committer_info(IDENT_STRICT)); if (header_len > sizeof(header_buf) - 1) die(_("tag header too big.")); if (!opt->message_given) { int fd; /* write the template message before editing: */ path = git_pathdup("TAG_EDITMSG"); fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (fd < 0) die_errno(_("could not create file '%s'"), path); if (!is_null_sha1(prev)) { write_tag_body(fd, prev); } else { struct strbuf buf = STRBUF_INIT; strbuf_addch(&buf, '\n'); if (opt->cleanup_mode == CLEANUP_ALL) strbuf_commented_addf(&buf, _(tag_template), tag, comment_line_char); else strbuf_commented_addf(&buf, _(tag_template_nocleanup), tag, comment_line_char); write_or_die(fd, buf.buf, buf.len); strbuf_release(&buf); } close(fd); if (launch_editor(path, buf, NULL)) { fprintf(stderr, _("Please supply the message using either -m or -F option.\n")); exit(1); } } if (opt->cleanup_mode != CLEANUP_NONE) strbuf_stripspace(buf, opt->cleanup_mode == CLEANUP_ALL); if (!opt->message_given && !buf->len) die(_("no tag message?")); strbuf_insert(buf, 0, header_buf, header_len); if (build_tag_object(buf, opt->sign, result) < 0) { if (path) fprintf(stderr, _("The tag message has been left in %s\n"), path); exit(128); } if (path) { unlink_or_warn(path); free(path); } } struct msg_arg { int given; struct strbuf buf; }; static int parse_msg_arg(const struct option *opt, const char *arg, int unset) { struct msg_arg *msg = opt->value; if (!arg) return -1; if (msg->buf.len) strbuf_addstr(&(msg->buf), "\n\n"); strbuf_addstr(&(msg->buf), arg); msg->given = 1; return 0; } static int strbuf_check_tag_ref(struct strbuf *sb, const char *name) { if (name[0] == '-') return -1; strbuf_reset(sb); strbuf_addf(sb, "refs/tags/%s", name); return check_refname_format(sb->buf, 0); } int cmd_tag(int argc, const char **argv, const char *prefix) { struct strbuf buf = STRBUF_INIT; struct strbuf ref = STRBUF_INIT; unsigned char object[20], prev[20]; const char *object_ref, *tag; struct create_tag_options opt; char *cleanup_arg = NULL; int create_reflog = 0; int annotate = 0, force = 0; int cmdmode = 0; const char *msgfile = NULL, *keyid = NULL; struct msg_arg msg = { 0, STRBUF_INIT }; struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; struct ref_filter filter; static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting; const char *format = NULL; struct option options[] = { OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'), { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"), N_("print <n> lines of each tag message"), PARSE_OPT_OPTARG, NULL, 1 }, OPT_CMDMODE('d', "delete", &cmdmode, N_("delete tags"), 'd'), OPT_CMDMODE('v', "verify", &cmdmode, N_("verify tags"), 'v'), OPT_GROUP(N_("Tag creation options")), OPT_BOOL('a', "annotate", &annotate, N_("annotated tag, needs a message")), OPT_CALLBACK('m', "message", &msg, N_("message"), N_("tag message"), parse_msg_arg), OPT_FILENAME('F', "file", &msgfile, N_("read message from file")), OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")), OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"), N_("how to strip spaces and #comments from message")), OPT_STRING('u', "local-user", &keyid, N_("key-id"), N_("use another key to sign the tag")), OPT__FORCE(&force, N_("replace the tag if exists")), OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")), OPT_GROUP(N_("Tag listing options")), OPT_COLUMN(0, "column", &colopts, N_("show tag list in columns")), OPT_CONTAINS(&filter.with_commit, N_("print only tags that contain the commit")), OPT_WITH(&filter.with_commit, N_("print only tags that contain the commit")), OPT_MERGED(&filter, N_("print only tags that are merged")), OPT_NO_MERGED(&filter, N_("print only tags that are not merged")), OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"), N_("field name to sort on"), &parse_opt_ref_sorting), { OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"), N_("print only tags of the object"), 0, parse_opt_object_name }, OPT_STRING( 0 , "format", &format, N_("format"), N_("format to use for the output")), OPT_END() }; git_config(git_tag_config, sorting_tail); memset(&opt, 0, sizeof(opt)); memset(&filter, 0, sizeof(filter)); filter.lines = -1; argc = parse_options(argc, argv, prefix, options, git_tag_usage, 0); if (keyid) { opt.sign = 1; set_signing_key(keyid); } if (opt.sign) annotate = 1; if (argc == 0 && !cmdmode) cmdmode = 'l'; if ((annotate || msg.given || msgfile || force) && (cmdmode != 0)) usage_with_options(git_tag_usage, options); finalize_colopts(&colopts, -1); if (cmdmode == 'l' && filter.lines != -1) { if (explicitly_enable_column(colopts)) die(_("--column and -n are incompatible")); colopts = 0; } if (!sorting) sorting = ref_default_sorting(); if (cmdmode == 'l') { int ret; if (column_active(colopts)) { struct column_options copts; memset(&copts, 0, sizeof(copts)); copts.padding = 2; run_column_filter(colopts, &copts); } filter.name_patterns = argv; ret = list_tags(&filter, sorting, format); if (column_active(colopts)) stop_column_filter(); return ret; } if (filter.lines != -1) die(_("-n option is only allowed with -l.")); if (filter.with_commit) die(_("--contains option is only allowed with -l.")); if (filter.points_at.nr) die(_("--points-at option is only allowed with -l.")); if (filter.merge_commit) die(_("--merged and --no-merged option are only allowed with -l")); if (cmdmode == 'd') return for_each_tag_name(argv, delete_tag); if (cmdmode == 'v') return for_each_tag_name(argv, verify_tag); if (msg.given || msgfile) { if (msg.given && msgfile) die(_("only one -F or -m option is allowed.")); annotate = 1; if (msg.given) strbuf_addbuf(&buf, &(msg.buf)); else { if (!strcmp(msgfile, "-")) { if (strbuf_read(&buf, 0, 1024) < 0) die_errno(_("cannot read '%s'"), msgfile); } else { if (strbuf_read_file(&buf, msgfile, 1024) < 0) die_errno(_("could not open or read '%s'"), msgfile); } } } tag = argv[0]; object_ref = argc == 2 ? argv[1] : "HEAD"; if (argc > 2) die(_("too many params")); if (get_sha1(object_ref, object)) die(_("Failed to resolve '%s' as a valid ref."), object_ref); if (strbuf_check_tag_ref(&ref, tag)) die(_("'%s' is not a valid tag name."), tag); if (read_ref(ref.buf, prev)) hashclr(prev); else if (!force) die(_("tag '%s' already exists"), tag); opt.message_given = msg.given || msgfile; if (!cleanup_arg || !strcmp(cleanup_arg, "strip")) opt.cleanup_mode = CLEANUP_ALL; else if (!strcmp(cleanup_arg, "verbatim")) opt.cleanup_mode = CLEANUP_NONE; else if (!strcmp(cleanup_arg, "whitespace")) opt.cleanup_mode = CLEANUP_SPACE; else die(_("Invalid cleanup mode %s"), cleanup_arg); if (annotate) create_tag(object, tag, &buf, &opt, prev, object); transaction = ref_transaction_begin(&err); if (!transaction || ref_transaction_update(transaction, ref.buf, object, prev, create_reflog ? REF_FORCE_CREATE_REFLOG : 0, NULL, &err) || ref_transaction_commit(transaction, &err)) die("%s", err.buf); ref_transaction_free(transaction); if (force && !is_null_sha1(prev) && hashcmp(prev, object)) printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(prev, DEFAULT_ABBREV)); strbuf_release(&err); strbuf_release(&buf); strbuf_release(&ref); return 0; }
TigerKid001/git
builtin/tag.c
C
gpl-2.0
13,238
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux Socket Filter - Kernel level socket filtering * * Based on the design of the Berkeley Packet Filter. The new * internal format has been designed by PLUMgrid: * * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com * * Authors: * * Jay Schulist <jschlst@samba.org> * Alexei Starovoitov <ast@plumgrid.com> * Daniel Borkmann <dborkman@redhat.com> * * Andi Kleen - Fix a few bad bugs and races. * Kris Katterjohn - Added many additional checks in bpf_check_classic() */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sock_diag.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include <linux/gfp.h> #include <net/inet_common.h> #include <net/ip.h> #include <net/protocol.h> #include <net/netlink.h> #include <linux/skbuff.h> #include <linux/skmsg.h> #include <net/sock.h> #include <net/flow_dissector.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <asm/cmpxchg.h> #include <linux/filter.h> #include <linux/ratelimit.h> #include <linux/seccomp.h> #include <linux/if_vlan.h> #include <linux/bpf.h> #include <net/sch_generic.h> #include <net/cls_cgroup.h> #include <net/dst_metadata.h> #include <net/dst.h> #include <net/sock_reuseport.h> #include <net/busy_poll.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/udp.h> #include <linux/bpf_trace.h> #include <net/xdp_sock.h> #include <linux/inetdevice.h> #include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/ip_fib.h> #include <net/flow.h> #include <net/arp.h> #include <net/ipv6.h> #include <net/net_namespace.h> #include <linux/seg6_local.h> #include <net/seg6.h> #include <net/seg6_local.h> #include <net/lwtunnel.h> #include <net/ipv6_stubs.h> #include <net/bpf_sk_storage.h> /** * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter * @cap: limit on how short the eBPF program may trim the packet * * Run the eBPF program and then cut skb->data to correct size returned by * the program. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to BPF_PROG_RUN. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) { int err; struct sk_filter *filter; /* * If the skb was allocated from pfmemalloc reserves, only * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); return -ENOMEM; } err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); if (err) return err; err = security_sock_rcv_skb(sk, skb); if (err) return err; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { struct sock *save_sk = skb->sk; unsigned int pkt_len; skb->sk = sk; pkt_len = bpf_prog_run_save_cb(filter->prog, skb); skb->sk = save_sk; err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock(); return err; } EXPORT_SYMBOL(sk_filter_trim_cap); BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) { return skb_get_poff(skb); } BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[a]; if (nla->nla_len > skb->len - a) return 0; nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u8 tmp, *ptr; const int len = sizeof(tmp); if (offset >= 0) { if (headlen - offset >= len) return *(u8 *)(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return tmp; } else { ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); if (likely(ptr)) return *(u8 *)ptr; } return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u16 tmp, *ptr; const int len = sizeof(tmp); if (offset >= 0) { if (headlen - offset >= len) return get_unaligned_be16(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be16_to_cpu(tmp); } else { ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); if (likely(ptr)) return get_unaligned_be16(ptr); } return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u32 tmp, *ptr; const int len = sizeof(tmp); if (likely(offset >= 0)) { if (headlen - offset >= len) return get_unaligned_be32(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be32_to_cpu(tmp); } else { ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); if (likely(ptr)) return get_unaligned_be32(ptr); } return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_0(bpf_get_raw_cpu_id) { return raw_smp_processor_id(); } static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { .func = bpf_get_raw_cpu_id, .gpl_only = false, .ret_type = RET_INTEGER, }; static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; switch (skb_field) { case SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, offsetof(struct sk_buff, mark)); break; case SKF_AD_PKTTYPE: *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); #endif break; case SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, queue_mapping)); break; case SKF_AD_VLAN_TAG: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, vlan_tci)); break; case SKF_AD_VLAN_TAG_PRESENT: *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); break; } return insn - insn_buf; } static bool convert_bpf_extensions(struct sock_filter *fp, struct bpf_insn **insnp) { struct bpf_insn *insn = *insnp; u32 cnt; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PKTTYPE: cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, dev)); /* if (tmp != 0) goto pc + 1 */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); *insn++ = BPF_EXIT_INSN(); if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, ifindex)); else *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, type)); break; case SKF_AD_OFF + SKF_AD_MARK: cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, hash)); break; case SKF_AD_OFF + SKF_AD_QUEUE: cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG: cnt = convert_skb_access(SKF_AD_VLAN_TAG, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TPID: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, vlan_proto)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PAY_OFFSET: case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = CTX */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); /* Emit call(arg1=CTX, arg2=A, arg3=X) */ switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); break; case SKF_AD_OFF + SKF_AD_NLATTR: *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); break; case SKF_AD_OFF + SKF_AD_NLATTR_NEST: *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); break; case SKF_AD_OFF + SKF_AD_CPU: *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); break; case SKF_AD_OFF + SKF_AD_RANDOM: *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); bpf_user_rnd_init_once(); break; } break; case SKF_AD_OFF + SKF_AD_ALU_XOR_X: /* A ^= X */ *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); break; default: /* This is just a dummy call to avoid letting the compiler * evict __bpf_call_base() as an optimization. Placed here * where no-one bothers. */ BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); return false; } *insnp = insn; return true; } static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) { const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); bool endian = BPF_SIZE(fp->code) == BPF_H || BPF_SIZE(fp->code) == BPF_W; bool indirect = BPF_MODE(fp->code) == BPF_IND; const int ip_align = NET_IP_ALIGN; struct bpf_insn *insn = *insnp; int offset = fp->k; if (!indirect && ((unaligned_ok && offset >= 0) || (!unaligned_ok && offset >= 0 && offset + ip_align >= 0 && offset + ip_align % size == 0))) { bool ldx_off_ok = offset <= S16_MAX; *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); if (offset) *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian + (!ldx_off_ok * 2)); if (ldx_off_ok) { *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_TMP, 0); } if (endian) *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); *insn++ = BPF_JMP_A(8); } *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); if (fp->k) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); } switch (BPF_SIZE(fp->code)) { case BPF_B: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); break; case BPF_H: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); break; case BPF_W: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); break; default: return false; } *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn = BPF_EXIT_INSN(); *insnp = insn; return true; } /** * bpf_convert_filter - convert filter program * @prog: the user passed filter program * @len: the length of the user passed filter program * @new_prog: allocated 'struct bpf_prog' or NULL * @new_len: pointer to store length of converted program * @seen_ld_abs: bool whether we've seen ld_abs/ind * * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' * style extended BPF (eBPF). * Conversion workflow: * * 1) First pass for calculating the new program length: * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) * * 2) 2nd pass to remap in two passes: 1st pass finds new * jump offsets, 2nd pass remapping: * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) */ static int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_prog *new_prog, int *new_len, bool *seen_ld_abs) { int new_flen = 0, pass = 0, target, i, stack_off; struct bpf_insn *new_insn, *first_insn = NULL; struct sock_filter *fp; int *addrs = NULL; u8 bpf_src; BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); if (len <= 0 || len > BPF_MAXINSNS) return -EINVAL; if (new_prog) { first_insn = new_prog->insnsi; addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL | __GFP_NOWARN); if (!addrs) return -ENOMEM; } do_pass: new_insn = first_insn; fp = prog; /* Classic BPF related prologue emission. */ if (new_prog) { /* Classic BPF expects A and X to be reset first. These need * to be guaranteed to be the first two instructions. */ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); /* All programs must keep CTX in callee saved BPF_REG_CTX. * In eBPF case it's done by the compiler, here we need to * do this ourself. Initial CTX is present in BPF_REG_ARG1. */ *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); if (*seen_ld_abs) { /* For packet access in classic BPF, cache skb->data * in callee-saved BPF R8 and skb->len - skb->data_len * (headlen) in BPF R9. Since classic BPF is read-only * on CTX, we only need to cache it once. */ *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), BPF_REG_D, BPF_REG_CTX, offsetof(struct sk_buff, data)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, offsetof(struct sk_buff, len)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, data_len)); *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); } } else { new_insn += 3; } for (i = 0; i < len; fp++, i++) { struct bpf_insn tmp_insns[32] = { }; struct bpf_insn *insn = tmp_insns; if (addrs) addrs[i] = new_insn - first_insn; switch (fp->code) { /* All arithmetic insns and skb loads map as-is. */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_NEG: case BPF_LD | BPF_ABS | BPF_W: case BPF_LD | BPF_ABS | BPF_H: case BPF_LD | BPF_ABS | BPF_B: case BPF_LD | BPF_IND | BPF_W: case BPF_LD | BPF_IND | BPF_H: case BPF_LD | BPF_IND | BPF_B: /* Check for overloaded BPF extension and * directly convert it if found, otherwise * just move on with mapping. */ if (BPF_CLASS(fp->code) == BPF_LD && BPF_MODE(fp->code) == BPF_ABS && convert_bpf_extensions(fp, &insn)) break; if (BPF_CLASS(fp->code) == BPF_LD && convert_bpf_ld_abs(fp, &insn)) { *seen_ld_abs = true; break; } if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); /* Error with exception code on div/mod by 0. * For cBPF programs, this was always return 0. */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn++ = BPF_EXIT_INSN(); } *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break; /* Jump transformation cannot use BPF block macros * everywhere as offset calculation and target updates * require a bit more work than the rest, i.e. jump * opcodes map as-is, but offsets need adjustment. */ #define BPF_EMIT_JMP \ do { \ const s32 off_min = S16_MIN, off_max = S16_MAX; \ s32 off; \ \ if (target >= len || target < 0) \ goto err; \ off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ /* Adjust pc relative offset for 2nd or 3rd insn. */ \ off -= insn - tmp_insns; \ /* Reject anything not fitting into insn->off. */ \ if (off < off_min || off > off_max) \ goto err; \ insn->off = off; \ } while (0) case BPF_JMP | BPF_JA: target = i + fp->k + 1; insn->code = fp->code; BPF_EMIT_JMP; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { /* BPF immediates are signed, zero extend * immediate into tmp register and use it * in compare insn. */ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); insn->dst_reg = BPF_REG_A; insn->src_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { insn->dst_reg = BPF_REG_A; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; } /* Common case where 'jump_false' is next insn. */ if (fp->jf == 0) { insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; target = i + fp->jt + 1; BPF_EMIT_JMP; break; } /* Convert some jumps when 'jump_true' is next insn. */ if (fp->jt == 0) { switch (BPF_OP(fp->code)) { case BPF_JEQ: insn->code = BPF_JMP | BPF_JNE | bpf_src; break; case BPF_JGT: insn->code = BPF_JMP | BPF_JLE | bpf_src; break; case BPF_JGE: insn->code = BPF_JMP | BPF_JLT | bpf_src; break; default: goto jmp_rest; } target = i + fp->jf + 1; BPF_EMIT_JMP; break; } jmp_rest: /* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; BPF_EMIT_JMP; insn++; insn->code = BPF_JMP | BPF_JA; target = i + fp->jf + 1; BPF_EMIT_JMP; break; /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: { struct sock_filter tmp = { .code = BPF_LD | BPF_ABS | BPF_B, .k = fp->k, }; *seen_ld_abs = true; /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = BPF_R0 = *(u8 *) (skb->data + K) */ convert_bpf_ld_abs(&tmp, &insn); insn++; /* A &= 0xf */ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); /* A <<= 2 */ *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); /* tmp = X */ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = tmp */ *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); break; } /* RET_K is remaped into 2 insns. RET_A case doesn't need an * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. */ case BPF_RET | BPF_A: case BPF_RET | BPF_K: if (BPF_RVAL(fp->code) == BPF_K) *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 0, fp->k); *insn = BPF_EXIT_INSN(); break; /* Store to stack. */ case BPF_ST: case BPF_STX: stack_off = fp->k * 4 + 4; *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == BPF_ST ? BPF_REG_A : BPF_REG_X, -stack_off); /* check_load_and_stores() verifies that classic BPF can * load from stack only after write, so tracking * stack_depth for ST|STX insns is enough */ if (new_prog && new_prog->aux->stack_depth < stack_off) new_prog->aux->stack_depth = stack_off; break; /* Load from stack. */ case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: stack_off = fp->k * 4 + 4; *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_FP, -stack_off); break; /* A = K or X = K */ case BPF_LD | BPF_IMM: case BPF_LDX | BPF_IMM: *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, fp->k); break; /* X = A */ case BPF_MISC | BPF_TAX: *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); break; /* A = X */ case BPF_MISC | BPF_TXA: *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); break; /* A = skb->len or X = skb->len */ case BPF_LD | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN: *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_CTX, offsetof(struct sk_buff, len)); break; /* Access seccomp_data fields. */ case BPF_LDX | BPF_ABS | BPF_W: /* A = *(u32 *) (ctx + K) */ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break; /* Unknown instruction. */ default: goto err; } insn++; if (new_prog) memcpy(new_insn, tmp_insns, sizeof(*insn) * (insn - tmp_insns)); new_insn += insn - tmp_insns; } if (!new_prog) { /* Only calculating new length. */ *new_len = new_insn - first_insn; if (*seen_ld_abs) *new_len += 4; /* Prologue bits. */ return 0; } pass++; if (new_flen != new_insn - first_insn) { new_flen = new_insn - first_insn; if (pass > 2) goto err; goto do_pass; } kfree(addrs); BUG_ON(*new_len != new_flen); return 0; err: kfree(addrs); return -EINVAL; } /* Security: * * As we dont want to clear mem[] array for each packet going through * __bpf_prog_run(), we check that filter loaded by user never try to read * a cell if not previously written, and we check all branches to be sure * a malicious user doesn't try to abuse us. */ static int check_load_and_stores(const struct sock_filter *filter, int flen) { u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0; BUILD_BUG_ON(BPF_MEMWORDS > 16); masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; memset(masks, 0xff, flen * sizeof(*masks)); for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc]; switch (filter[pc].code) { case BPF_ST: case BPF_STX: memvalid |= (1 << filter[pc].k); break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; case BPF_JMP | BPF_JA: /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; break; } } error: kfree(masks); return ret; } static bool chk_code_allowed(u16 code_to_probe) { static const bool codes[] = { /* 32 bit ALU operations */ [BPF_ALU | BPF_ADD | BPF_K] = true, [BPF_ALU | BPF_ADD | BPF_X] = true, [BPF_ALU | BPF_SUB | BPF_K] = true, [BPF_ALU | BPF_SUB | BPF_X] = true, [BPF_ALU | BPF_MUL | BPF_K] = true, [BPF_ALU | BPF_MUL | BPF_X] = true, [BPF_ALU | BPF_DIV | BPF_K] = true, [BPF_ALU | BPF_DIV | BPF_X] = true, [BPF_ALU | BPF_MOD | BPF_K] = true, [BPF_ALU | BPF_MOD | BPF_X] = true, [BPF_ALU | BPF_AND | BPF_K] = true, [BPF_ALU | BPF_AND | BPF_X] = true, [BPF_ALU | BPF_OR | BPF_K] = true, [BPF_ALU | BPF_OR | BPF_X] = true, [BPF_ALU | BPF_XOR | BPF_K] = true, [BPF_ALU | BPF_XOR | BPF_X] = true, [BPF_ALU | BPF_LSH | BPF_K] = true, [BPF_ALU | BPF_LSH | BPF_X] = true, [BPF_ALU | BPF_RSH | BPF_K] = true, [BPF_ALU | BPF_RSH | BPF_X] = true, [BPF_ALU | BPF_NEG] = true, /* Load instructions */ [BPF_LD | BPF_W | BPF_ABS] = true, [BPF_LD | BPF_H | BPF_ABS] = true, [BPF_LD | BPF_B | BPF_ABS] = true, [BPF_LD | BPF_W | BPF_LEN] = true, [BPF_LD | BPF_W | BPF_IND] = true, [BPF_LD | BPF_H | BPF_IND] = true, [BPF_LD | BPF_B | BPF_IND] = true, [BPF_LD | BPF_IMM] = true, [BPF_LD | BPF_MEM] = true, [BPF_LDX | BPF_W | BPF_LEN] = true, [BPF_LDX | BPF_B | BPF_MSH] = true, [BPF_LDX | BPF_IMM] = true, [BPF_LDX | BPF_MEM] = true, /* Store instructions */ [BPF_ST] = true, [BPF_STX] = true, /* Misc instructions */ [BPF_MISC | BPF_TAX] = true, [BPF_MISC | BPF_TXA] = true, /* Return instructions */ [BPF_RET | BPF_K] = true, [BPF_RET | BPF_A] = true, /* Jump instructions */ [BPF_JMP | BPF_JA] = true, [BPF_JMP | BPF_JEQ | BPF_K] = true, [BPF_JMP | BPF_JEQ | BPF_X] = true, [BPF_JMP | BPF_JGE | BPF_K] = true, [BPF_JMP | BPF_JGE | BPF_X] = true, [BPF_JMP | BPF_JGT | BPF_K] = true, [BPF_JMP | BPF_JGT | BPF_X] = true, [BPF_JMP | BPF_JSET | BPF_K] = true, [BPF_JMP | BPF_JSET | BPF_X] = true, }; if (code_to_probe >= ARRAY_SIZE(codes)) return false; return codes[code_to_probe]; } static bool bpf_check_basics_ok(const struct sock_filter *filter, unsigned int flen) { if (filter == NULL) return false; if (flen == 0 || flen > BPF_MAXINSNS) return false; return true; } /** * bpf_check_classic - verify socket filter code * @filter: filter to verify * @flen: length of filter * * Check the user's filter code. If we let some ugly * filter code slip through kaboom! The filter must contain * no references or jumps that are out of range, no illegal * instructions, and must end with a RET instruction. * * All jumps are forward as they are not signed. * * Returns 0 if the rule set is legal or -EINVAL if not. */ static int bpf_check_classic(const struct sock_filter *filter, unsigned int flen) { bool anc_found; int pc; /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; /* May we actually operate on this code? */ if (!chk_code_allowed(ftest->code)) return -EINVAL; /* Some instructions need special checks */ switch (ftest->code) { case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_K: /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: if (ftest->k >= 32) return -EINVAL; break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: case BPF_ST: case BPF_STX: /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; case BPF_JMP | BPF_JA: /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: anc_found = false; if (bpf_anc_helper(ftest) & BPF_ANC) anc_found = true; /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } } /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { case BPF_RET | BPF_K: case BPF_RET | BPF_A: return check_load_and_stores(filter, flen); } return -EINVAL; } static int bpf_prog_store_orig_filter(struct bpf_prog *fp, const struct sock_fprog *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct sock_fprog_kern *fkprog; fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); if (!fp->orig_prog) return -ENOMEM; fkprog = fp->orig_prog; fkprog->len = fprog->len; fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL | __GFP_NOWARN); if (!fkprog->filter) { kfree(fp->orig_prog); return -ENOMEM; } return 0; } static void bpf_release_orig_filter(struct bpf_prog *fp) { struct sock_fprog_kern *fprog = fp->orig_prog; if (fprog) { kfree(fprog->filter); kfree(fprog); } } static void __bpf_prog_release(struct bpf_prog *prog) { if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { bpf_prog_put(prog); } else { bpf_release_orig_filter(prog); bpf_prog_free(prog); } } static void __sk_filter_release(struct sk_filter *fp) { __bpf_prog_release(fp->prog); kfree(fp); } /** * sk_filter_release_rcu - Release a socket filter by rcu_head * @rcu: rcu_head that contains the sk_filter to free */ static void sk_filter_release_rcu(struct rcu_head *rcu) { struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); __sk_filter_release(fp); } /** * sk_filter_release - release a socket filter * @fp: filter to remove * * Remove a filter from a socket and release its resources. */ static void sk_filter_release(struct sk_filter *fp) { if (refcount_dec_and_test(&fp->refcnt)) call_rcu(&fp->rcu, sk_filter_release_rcu); } void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); atomic_sub(filter_size, &sk->sk_omem_alloc); sk_filter_release(fp); } /* try to charge the socket memory if there is space available * return true on success */ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); /* same check as in sock_kmalloc() */ if (filter_size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { atomic_add(filter_size, &sk->sk_omem_alloc); return true; } return false; } bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) { if (!refcount_inc_not_zero(&fp->refcnt)) return false; if (!__sk_filter_charge(sk, fp)) { sk_filter_release(fp); return false; } return true; } static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) { struct sock_filter *old_prog; struct bpf_prog *old_fp; int err, new_len, old_len = fp->len; bool seen_ld_abs = false; /* We are free to overwrite insns et al right here as it * won't be used at this point in time anymore internally * after the migration to the internal BPF instruction * representation. */ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct bpf_insn)); /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. */ old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), GFP_KERNEL | __GFP_NOWARN); if (!old_prog) { err = -ENOMEM; goto out_err; } /* 1st pass: calculate the new program length. */ err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs); if (err) goto out_err_free; /* Expand fp for appending the new filter representation. */ old_fp = fp; fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); if (!fp) { /* The old_fp is still around in case we couldn't * allocate new memory, so uncharge on that one. */ fp = old_fp; err = -ENOMEM; goto out_err_free; } fp->len = new_len; /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ err = bpf_convert_filter(old_prog, old_len, fp, &new_len, &seen_ld_abs); if (err) /* 2nd bpf_convert_filter() can fail only if it fails * to allocate memory, remapping must succeed. Note, * that at this time old_fp has already been released * by krealloc(). */ goto out_err_free; fp = bpf_prog_select_runtime(fp, &err); if (err) goto out_err_free; kfree(old_prog); return fp; out_err_free: kfree(old_prog); out_err: __bpf_prog_release(fp); return ERR_PTR(err); } static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, bpf_aux_classic_check_t trans) { int err; fp->bpf_func = NULL; fp->jited = 0; err = bpf_check_classic(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } /* There might be additional checks and transformations * needed on classic filters, f.e. in case of seccomp. */ if (trans) { err = trans(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } } /* Probe if we can JIT compile the filter and if so, do * the compilation of the filter. */ bpf_jit_compile(fp); /* JIT compiler couldn't process this filter, so do the * internal BPF translation for the optimized interpreter. */ if (!fp->jited) fp = bpf_migrate_filter(fp); return fp; } /** * bpf_prog_create - create an unattached filter * @pfp: the unattached filter that is created * @fprog: the filter program * * Create a filter independent of any socket. We first run some * sanity checks on it to make sure it does not explode on us later. * If an error occurs or there is insufficient memory for the filter * a negative errno code is returned. On success the return is zero. */ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; memcpy(fp->insns, fprog->filter, fsize); fp->len = fprog->len; /* Since unattached filters are not copied back to user * space through sk_get_filter(), we do not need to hold * a copy here, and can spare us the work. */ fp->orig_prog = NULL; /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, NULL); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(bpf_prog_create); /** * bpf_prog_create_from_user - create an unattached filter from user buffer * @pfp: the unattached filter that is created * @fprog: the filter program * @trans: post-classic verifier transformation handler * @save_orig: save classic BPF program * * This function effectively does the same as bpf_prog_create(), only * that it builds up its insns buffer from user space provided buffer. * It also allows for passing a bpf_aux_classic_check_t handler. */ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; int err; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; if (copy_from_user(fp->insns, fprog->filter, fsize)) { __bpf_prog_free(fp); return -EFAULT; } fp->len = fprog->len; fp->orig_prog = NULL; if (save_orig) { err = bpf_prog_store_orig_filter(fp, fprog); if (err) { __bpf_prog_free(fp); return -ENOMEM; } } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, trans); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); void bpf_prog_destroy(struct bpf_prog *fp) { __bpf_prog_release(fp); } EXPORT_SYMBOL_GPL(bpf_prog_destroy); static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) { struct sk_filter *fp, *old_fp; fp = kmalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->prog = prog; if (!__sk_filter_charge(sk, fp)) { kfree(fp); return -ENOMEM; } refcount_set(&fp->refcnt, 1); old_fp = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_filter, fp); if (old_fp) sk_filter_uncharge(sk, old_fp); return 0; } static struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *prog; int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL); prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM); if (copy_from_user(prog->insns, fprog->filter, fsize)) { __bpf_prog_free(prog); return ERR_PTR(-EFAULT); } prog->len = fprog->len; err = bpf_prog_store_orig_filter(prog, fprog); if (err) { __bpf_prog_free(prog); return ERR_PTR(-ENOMEM); } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ return bpf_prepare_filter(prog, NULL); } /** * sk_attach_filter - attach a socket filter * @fprog: the filter program * @sk: the socket to use * * Attach the user's filter code. We first run some sanity checks on * it to make sure it does not explode on us later. If an error * occurs or there is insufficient memory for the filter a negative * errno code is returned. On success the return is zero. */ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { __bpf_prog_release(prog); return err; } return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); if (bpf_prog_size(prog->len) > sysctl_optmem_max) err = -ENOMEM; else err = reuseport_attach_prog(sk, prog); if (err) __bpf_prog_release(prog); return err; } static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) { if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); } int sk_attach_bpf(u32 ufd, struct sock *sk) { struct bpf_prog *prog = __get_bpf(ufd, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { bpf_prog_put(prog); return err; } return 0; } int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) { struct bpf_prog *prog; int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL) prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); if (IS_ERR(prog)) return PTR_ERR(prog); if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { /* Like other non BPF_PROG_TYPE_SOCKET_FILTER * bpf prog (e.g. sockmap). It depends on the * limitation imposed by bpf_prog_load(). * Hence, sysctl_optmem_max is not checked. */ if ((sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM) || (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_TCP) || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { err = -ENOTSUPP; goto err_prog_put; } } else { /* BPF_PROG_TYPE_SOCKET_FILTER */ if (bpf_prog_size(prog->len) > sysctl_optmem_max) { err = -ENOMEM; goto err_prog_put; } } err = reuseport_attach_prog(sk, prog); err_prog_put: if (err) bpf_prog_put(prog); return err; } void sk_reuseport_prog_free(struct bpf_prog *prog) { if (!prog) return; if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) bpf_prog_put(prog); else bpf_prog_destroy(prog); } struct bpf_scratchpad { union { __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; u8 buff[MAX_BPF_STACK]; }; }; static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); static inline int __bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { return skb_ensure_writable(skb, write_len); } static inline int bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { int err = __bpf_try_make_writable(skb, write_len); bpf_compute_data_pointers(skb); return err; } static int bpf_try_make_head_writable(struct sk_buff *skb) { return bpf_try_make_writable(skb, skb_headlen(skb)); } static inline void bpf_push_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); } static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); } BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len, u64, flags) { void *ptr; if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) return -EINVAL; if (unlikely(offset > 0xffff)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; ptr = skb->data + offset; if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpull_rcsum(skb, ptr, len, offset); memcpy(ptr, from, len); if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpush_rcsum(skb, ptr, len, offset); if (flags & BPF_F_INVALIDATE_HASH) skb_clear_hash(skb); return 0; } static const struct bpf_func_proto bpf_skb_store_bytes_proto = { .func = bpf_skb_store_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > 0xffff)) goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_skb_load_bytes_proto = { .func = bpf_skb_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_4(bpf_flow_dissector_load_bytes, const struct bpf_flow_dissector *, ctx, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > 0xffff)) goto err_clear; if (unlikely(!ctx->skb)) goto err_clear; ptr = skb_header_pointer(ctx->skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { .func = bpf_flow_dissector_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { u8 *end = skb_tail_pointer(skb); u8 *net = skb_network_header(skb); u8 *mac = skb_mac_header(skb); u8 *ptr; if (unlikely(offset > 0xffff || len > (end - mac))) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: ptr = mac + offset; break; case BPF_HDR_START_NET: ptr = net + offset; break; default: goto err_clear; } if (likely(ptr >= mac && ptr + len <= end)) { memcpy(to, ptr, len); return 0; } err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { .func = bpf_skb_load_bytes_relative, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto bpf_skb_pull_data_proto = { .func = bpf_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) { return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; } static const struct bpf_func_proto bpf_sk_fullsock_proto = { .func = bpf_sk_fullsock, .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_SOCK_COMMON, }; static inline int sk_skb_try_make_writable(struct sk_buff *skb, unsigned int write_len) { int err = __bpf_try_make_writable(skb, write_len); bpf_compute_data_end_sk_skb(skb); return err; } BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto sk_skb_pull_data_proto = { .func = sk_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { __sum16 *ptr; if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); switch (flags & BPF_F_HDR_FIELD_MASK) { case 0: if (unlikely(from != 0)) return -EINVAL; csum_replace_by_diff(ptr, to); break; case 2: csum_replace2(ptr, from, to); break; case 4: csum_replace4(ptr, from, to); break; default: return -EINVAL; } return 0; } static const struct bpf_func_proto bpf_l3_csum_replace_proto = { .func = bpf_l3_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; bool do_mforce = flags & BPF_F_MARK_ENFORCE; __sum16 *ptr; if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); if (is_mmzero && !do_mforce && !*ptr) return 0; switch (flags & BPF_F_HDR_FIELD_MASK) { case 0: if (unlikely(from != 0)) return -EINVAL; inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); break; case 2: inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); break; case 4: inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); break; default: return -EINVAL; } if (is_mmzero && !*ptr) *ptr = CSUM_MANGLED_0; return 0; } static const struct bpf_func_proto bpf_l4_csum_replace_proto = { .func = bpf_l4_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, __be32 *, to, u32, to_size, __wsum, seed) { struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); u32 diff_size = from_size + to_size; int i, j = 0; /* This is quite flexible, some examples: * * from_size == 0, to_size > 0, seed := csum --> pushing data * from_size > 0, to_size == 0, seed := csum --> pulling data * from_size > 0, to_size > 0, seed := 0 --> diffing data * * Even for diffing, from_size and to_size don't need to be equal. */ if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || diff_size > sizeof(sp->diff))) return -EINVAL; for (i = 0; i < from_size / sizeof(__be32); i++, j++) sp->diff[j] = ~from[i]; for (i = 0; i < to_size / sizeof(__be32); i++, j++) sp->diff[j] = to[i]; return csum_partial(sp->diff, diff_size, seed); } static const struct bpf_func_proto bpf_csum_diff_proto = { .func = bpf_csum_diff, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM_OR_NULL, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_PTR_TO_MEM_OR_NULL, .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) { /* The interface is to be used in combination with bpf_csum_diff() * for direct packet writes. csum rotation for alignment as well * as emulating csum_sub() can be done from the eBPF program. */ if (skb->ip_summed == CHECKSUM_COMPLETE) return (skb->csum = csum_add(skb->csum, csum)); return -ENOTSUPP; } static const struct bpf_func_proto bpf_csum_update_proto = { .func = bpf_csum_update, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) { return dev_forward_skb(dev, skb); } static inline int __bpf_rx_skb_no_mac(struct net_device *dev, struct sk_buff *skb) { int ret = ____dev_forward_skb(dev, skb); if (likely(!ret)) { skb->dev = dev; ret = netif_rx(skb); } return ret; } static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; if (dev_xmit_recursion()) { net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; } skb->dev = dev; dev_xmit_recursion_inc(); ret = dev_queue_xmit(skb); dev_xmit_recursion_dec(); return ret; } static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, u32 flags) { unsigned int mlen = skb_network_offset(skb); if (mlen) { __skb_pull(skb, mlen); /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that * the skb is originated from ingress (i.e. a forwarded skb) * to ensure that rcsum starts at net header. */ if (!skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); } skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); } static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, u32 flags) { /* Verify that a link layer header is carried */ if (unlikely(skb->mac_header >= skb->network_header)) { kfree_skb(skb); return -ERANGE; } bpf_push_mac_rcsum(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); } static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, u32 flags) { if (dev_is_mac_header_xmit(dev)) return __bpf_redirect_common(skb, dev, flags); else return __bpf_redirect_no_mac(skb, dev, flags); } BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; struct sk_buff *clone; int ret; if (unlikely(flags & ~(BPF_F_INGRESS))) return -EINVAL; dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); if (unlikely(!dev)) return -EINVAL; clone = skb_clone(skb, GFP_ATOMIC); if (unlikely(!clone)) return -ENOMEM; /* For direct write, we need to keep the invariant that the skbs * we're dealing with need to be uncloned. Should uncloning fail * here, we need to free the just generated clone to unclone once * again. */ ret = bpf_try_make_head_writable(skb); if (unlikely(ret)) { kfree_skb(clone); return -ENOMEM; } return __bpf_redirect(clone, dev, flags); } static const struct bpf_func_proto bpf_clone_redirect_proto = { .func = bpf_clone_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags & ~(BPF_F_INGRESS))) return TC_ACT_SHOT; ri->ifindex = ifindex; ri->flags = flags; return TC_ACT_REDIRECT; } int skb_do_redirect(struct sk_buff *skb) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct net_device *dev; dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); ri->ifindex = 0; if (unlikely(!dev)) { kfree_skb(skb); return -EINVAL; } return __bpf_redirect(skb, dev, ri->flags); } static const struct bpf_func_proto bpf_redirect_proto = { .func = bpf_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) { msg->apply_bytes = bytes; return 0; } static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { .func = bpf_msg_apply_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) { msg->cork_bytes = bytes; return 0; } static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { .func = bpf_msg_cork_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, u32, end, u64, flags) { u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; u32 first_sge, last_sge, i, shift, bytes_sg_total; struct scatterlist *sge; u8 *raw, *to, *from; struct page *page; if (unlikely(flags || end <= start)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; offset += len; sk_msg_iter_var_next(i); } while (i != msg->sg.end); if (unlikely(start >= offset + len)) return -EINVAL; first_sge = i; /* The start may point into the sg element so we need to also * account for the headroom. */ bytes_sg_total = start - offset + bytes; if (!msg->sg.copy[i] && bytes_sg_total <= len) goto out; /* At this point we need to linearize multiple scatterlist * elements or a single shared page. Either way we need to * copy into a linear buffer exclusively owned by BPF. Then * place the buffer in the scatterlist and fixup the original * entries by removing the entries now in the linear buffer * and shifting the remaining entries. For now we do not try * to copy partial entries to avoid complexity of running out * of sg_entry slots. The downside is reading a single byte * will copy the entire sg entry. */ do { copy += sk_msg_elem(msg, i)->length; sk_msg_iter_var_next(i); if (bytes_sg_total <= copy) break; } while (i != msg->sg.end); last_sge = i; if (unlikely(bytes_sg_total > copy)) return -EINVAL; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy)); if (unlikely(!page)) return -ENOMEM; raw = page_address(page); i = first_sge; do { sge = sk_msg_elem(msg, i); from = sg_virt(sge); len = sge->length; to = raw + poffset; memcpy(to, from, len); poffset += len; sge->length = 0; put_page(sg_page(sge)); sk_msg_iter_var_next(i); } while (i != last_sge); sg_set_page(&msg->sg.data[first_sge], page, copy, 0); /* To repair sg ring we need to shift entries. If we only * had a single entry though we can just replace it and * be done. Otherwise walk the ring and shift the entries. */ WARN_ON_ONCE(last_sge == first_sge); shift = last_sge > first_sge ? last_sge - first_sge - 1 : MAX_SKB_FRAGS - first_sge + last_sge - 1; if (!shift) goto out; i = first_sge; sk_msg_iter_var_next(i); do { u32 move_from; if (i + shift >= MAX_MSG_FRAGS) move_from = i + shift - MAX_MSG_FRAGS; else move_from = i + shift; if (move_from == msg->sg.end) break; msg->sg.data[i] = msg->sg.data[move_from]; msg->sg.data[move_from].length = 0; msg->sg.data[move_from].page_link = 0; msg->sg.data[move_from].offset = 0; sk_msg_iter_var_next(i); } while (1); msg->sg.end = msg->sg.end - shift > msg->sg.end ? msg->sg.end - shift + MAX_MSG_FRAGS : msg->sg.end - shift; out: msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; msg->data_end = msg->data + bytes; return 0; } static const struct bpf_func_proto bpf_msg_pull_data_proto = { .func = bpf_msg_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; u32 new, i = 0, l, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page; if (unlikely(flags)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); if (start >= offset + l) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); /* If no space available will fallback to copy, we need at * least one scatterlist elem available to push data into * when start aligns to the beginning of an element or two * when it falls inside an element. We handle the start equals * offset case because its the common case for inserting a * header. */ if (!space || (space == 1 && start != offset)) copy = msg->sg.data[i].length; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy + len)); if (unlikely(!page)) return -ENOMEM; if (copy) { int front, back; raw = page_address(page); psge = sk_msg_elem(msg, i); front = start - offset; back = psge->length - front; from = sg_virt(psge); if (front) memcpy(raw, from, front); if (back) { from += front; to = raw + front + len; memcpy(to, from, back); } put_page(sg_page(psge)); } else if (start - offset) { psge = sk_msg_elem(msg, i); rsge = sk_msg_elem_cpy(msg, i); psge->length = start - offset; rsge.length -= psge->length; rsge.offset += start; sk_msg_iter_var_next(i); sg_unmark_end(psge); sk_msg_iter_next(msg, end); } /* Slot(s) to place newly allocated data */ new = i; /* Shift one or two slots as needed */ if (!copy) { sge = sk_msg_elem_cpy(msg, i); sk_msg_iter_var_next(i); sg_unmark_end(&sge); sk_msg_iter_next(msg, end); nsge = sk_msg_elem_cpy(msg, i); if (rsge.length) { sk_msg_iter_var_next(i); nnsge = sk_msg_elem_cpy(msg, i); } while (i != msg->sg.end) { msg->sg.data[i] = sge; sge = nsge; sk_msg_iter_var_next(i); if (rsge.length) { nsge = nnsge; nnsge = sk_msg_elem_cpy(msg, i); } else { nsge = sk_msg_elem_cpy(msg, i); } } } /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; msg->sg.copy[new] = false; sg_set_page(&msg->sg.data[new], page, len + copy, 0); if (rsge.length) { get_page(sg_page(&rsge)); sk_msg_iter_var_next(new); msg->sg.data[new] = rsge; } sk_msg_compute_data_pointers(msg); return 0; } static const struct bpf_func_proto bpf_msg_push_data_proto = { .func = bpf_msg_push_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static void sk_msg_shift_left(struct sk_msg *msg, int i) { int prev; do { prev = i; sk_msg_iter_var_next(i); msg->sg.data[prev] = msg->sg.data[i]; } while (i != msg->sg.end); sk_msg_iter_prev(msg, end); } static void sk_msg_shift_right(struct sk_msg *msg, int i) { struct scatterlist tmp, sge; sk_msg_iter_next(msg, end); sge = sk_msg_elem_cpy(msg, i); sk_msg_iter_var_next(i); tmp = sk_msg_elem_cpy(msg, i); while (i != msg->sg.end) { msg->sg.data[i] = sge; sk_msg_iter_var_next(i); sge = tmp; tmp = sk_msg_elem_cpy(msg, i); } } BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { u32 i = 0, l, space, offset = 0; u64 last = start + len; int pop; if (unlikely(flags)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); /* Bounds checks: start and pop must be inside message */ if (start >= offset + l || last >= msg->sg.size) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); pop = len; /* --------------| offset * -| start |-------- len -------| * * |----- a ----|-------- pop -------|----- b ----| * |______________________________________________| length * * * a: region at front of scatter element to save * b: region at back of scatter element to save when length > A + pop * pop: region to pop from element, same as input 'pop' here will be * decremented below per iteration. * * Two top-level cases to handle when start != offset, first B is non * zero and second B is zero corresponding to when a pop includes more * than one element. * * Then if B is non-zero AND there is no space allocate space and * compact A, B regions into page. If there is space shift ring to * the rigth free'ing the next element in ring to place B, leaving * A untouched except to reduce length. */ if (start != offset) { struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); int a = start; int b = sge->length - pop - a; sk_msg_iter_var_next(i); if (pop < sge->length - a) { if (space) { sge->length = a; sk_msg_shift_right(msg, i); nsge = sk_msg_elem(msg, i); get_page(sg_page(sge)); sg_set_page(nsge, sg_page(sge), b, sge->offset + pop + a); } else { struct page *page, *orig; u8 *to, *from; page = alloc_pages(__GFP_NOWARN | __GFP_COMP | GFP_ATOMIC, get_order(a + b)); if (unlikely(!page)) return -ENOMEM; sge->length = a; orig = sg_page(sge); from = sg_virt(sge); to = page_address(page); memcpy(to, from, a); memcpy(to + a, from + a + pop, b); sg_set_page(sge, page, a + b, 0); put_page(orig); } pop = 0; } else if (pop >= sge->length - a) { sge->length = a; pop -= (sge->length - a); } } /* From above the current layout _must_ be as follows, * * -| offset * -| start * * |---- pop ---|---------------- b ------------| * |____________________________________________| length * * Offset and start of the current msg elem are equal because in the * previous case we handled offset != start and either consumed the * entire element and advanced to the next element OR pop == 0. * * Two cases to handle here are first pop is less than the length * leaving some remainder b above. Simply adjust the element's layout * in this case. Or pop >= length of the element so that b = 0. In this * case advance to next element decrementing pop. */ while (pop) { struct scatterlist *sge = sk_msg_elem(msg, i); if (pop < sge->length) { sge->length -= pop; sge->offset += pop; pop = 0; } else { pop -= sge->length; sk_msg_shift_left(msg, i); } sk_msg_iter_var_next(i); } sk_mem_uncharge(msg->sk, len - pop); msg->sg.size -= (len - pop); sk_msg_compute_data_pointers(msg); return 0; } static const struct bpf_func_proto bpf_msg_pop_data_proto = { .func = bpf_msg_pop_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) { return task_get_classid(skb); } static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { .func = bpf_get_cgroup_classid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) { return dst_tclassid(skb); } static const struct bpf_func_proto bpf_get_route_realm_proto = { .func = bpf_get_route_realm, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) { /* If skb_clear_hash() was called due to mangling, we can * trigger SW recalculation here. Later access to hash * can then use the inline skb->hash via context directly * instead of calling this helper again. */ return skb_get_hash(skb); } static const struct bpf_func_proto bpf_get_hash_recalc_proto = { .func = bpf_get_hash_recalc, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) { /* After all direct packet write, this can be used once for * triggering a lazy recalc on next skb_get_hash() invocation. */ skb_clear_hash(skb); return 0; } static const struct bpf_func_proto bpf_set_hash_invalid_proto = { .func = bpf_set_hash_invalid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) { /* Set user specified hash as L4(+), so that it gets returned * on skb_get_hash() call unless BPF prog later on triggers a * skb_clear_hash(). */ __skb_set_sw_hash(skb, hash, true); return 0; } static const struct bpf_func_proto bpf_set_hash_proto = { .func = bpf_set_hash, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, u16, vlan_tci) { int ret; if (unlikely(vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD))) vlan_proto = htons(ETH_P_8021Q); bpf_push_mac_rcsum(skb); ret = skb_vlan_push(skb, vlan_proto, vlan_tci); bpf_pull_mac_rcsum(skb); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_vlan_push_proto = { .func = bpf_skb_vlan_push, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) { int ret; bpf_push_mac_rcsum(skb); ret = skb_vlan_pop(skb); bpf_pull_mac_rcsum(skb); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { .func = bpf_skb_vlan_pop, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) { /* Caller already did skb_cow() with len as headroom, * so no need to do it here. */ skb_push(skb, len); memmove(skb->data, skb->data + len, off); memset(skb->data + off, 0, len); /* No skb_postpush_rcsum(skb, skb->data + off, len) * needed here as it does not change the skb->csum * result for checksum complete when summing over * zeroed blocks. */ return 0; } static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) { /* skb_ensure_writable() is not needed here, as we're * already working on an uncloned skb. */ if (unlikely(!pskb_may_pull(skb, off + len))) return -ENOMEM; skb_postpull_rcsum(skb, skb->data + off, len); memmove(skb->data + len, skb->data, off); __skb_pull(skb, len); return 0; } static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* There's no need for __skb_push()/__skb_pull() pair to * get to the start of the mac header as we're guaranteed * to always start from here under eBPF. */ ret = bpf_skb_generic_push(skb, off, len); if (likely(!ret)) { skb->mac_header -= len; skb->network_header -= len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* Same here, __skb_push()/__skb_pull() pair not needed. */ ret = bpf_skb_generic_pop(skb, off, len); if (likely(!ret)) { skb->mac_header += len; skb->network_header += len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int bpf_skb_proto_4_to_6(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); u32 off = skb_mac_header_len(skb); int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_push(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV4 needs to be changed into * SKB_GSO_TCPV6. */ if (shinfo->gso_type & SKB_GSO_TCPV4) { shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } /* Due to IPv6 header, MSS needs to be downgraded. */ skb_decrease_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IPV6); skb_clear_hash(skb); return 0; } static int bpf_skb_proto_6_to_4(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); u32 off = skb_mac_header_len(skb); int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV6 needs to be changed into * SKB_GSO_TCPV4. */ if (shinfo->gso_type & SKB_GSO_TCPV6) { shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } /* Due to IPv4 header, MSS can be upgraded. */ skb_increase_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IP); skb_clear_hash(skb); return 0; } static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) { __be16 from_proto = skb->protocol; if (from_proto == htons(ETH_P_IP) && to_proto == htons(ETH_P_IPV6)) return bpf_skb_proto_4_to_6(skb); if (from_proto == htons(ETH_P_IPV6) && to_proto == htons(ETH_P_IP)) return bpf_skb_proto_6_to_4(skb); return -ENOTSUPP; } BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, u64, flags) { int ret; if (unlikely(flags)) return -EINVAL; /* General idea is that this helper does the basic groundwork * needed for changing the protocol, and eBPF program fills the * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() * and other helpers, rather than passing a raw buffer here. * * The rationale is to keep this minimal and without a need to * deal with raw packet data. F.e. even if we would pass buffers * here, the program still needs to call the bpf_lX_csum_replace() * helpers anyway. Plus, this way we keep also separation of * concerns, since f.e. bpf_skb_store_bytes() should only take * care of stores. * * Currently, additional options and extension header space are * not supported, but flags register is reserved so we can adapt * that. For offloads, we mark packet as dodgy, so that headers * need to be verified first. */ ret = bpf_skb_proto_xlat(skb, proto); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_proto_proto = { .func = bpf_skb_change_proto, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) { /* We only allow a restricted subset to be changed for now. */ if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || !skb_pkt_type_ok(pkt_type))) return -EINVAL; skb->pkt_type = pkt_type; return 0; } static const struct bpf_func_proto bpf_skb_change_type_proto = { .func = bpf_skb_change_type, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static u32 bpf_skb_net_base_len(const struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_IP): return sizeof(struct iphdr); case htons(ETH_P_IPV6): return sizeof(struct ipv6hdr); default: return ~0U; } } #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ BPF_F_ADJ_ROOM_ENCAP_L2( \ BPF_ADJ_ROOM_ENCAP_L2_MASK)) static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; u16 mac_len = 0, inner_net = 0, inner_trans = 0; unsigned int gso_type = SKB_GSO_DODGY; int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) return -ENOTSUPP; } ret = skb_cow_head(skb, len_diff); if (unlikely(ret < 0)) return ret; if (encap) { if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -ENOTSUPP; if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) return -EINVAL; if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) return -EINVAL; if (skb->encapsulation) return -EALREADY; mac_len = skb->network_header - skb->mac_header; inner_net = skb->network_header; if (inner_mac_len > len_diff) return -EINVAL; inner_trans = skb->transport_header; } ret = bpf_skb_net_hdr_push(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (encap) { skb->inner_mac_header = inner_net - inner_mac_len; skb->inner_network_header = inner_net; skb->inner_transport_header = inner_trans; skb_set_inner_protocol(skb, skb->protocol); skb->encapsulation = 1; skb_set_network_header(skb, mac_len); if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) gso_type |= SKB_GSO_UDP_TUNNEL; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) gso_type |= SKB_GSO_GRE; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) gso_type |= SKB_GSO_IPXIP6; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) gso_type |= SKB_GSO_IPXIP4; if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr); skb_set_transport_header(skb, mac_len + nh_len); } /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) skb->protocol = htons(ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) skb->protocol = htons(ETH_P_IP); } if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Due to header grow, MSS needs to be downgraded. */ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) skb_decrease_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= gso_type; shinfo->gso_segs = 0; } return 0; } static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { int ret; if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO) return -EINVAL; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) return -ENOTSUPP; } ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Due to header shrink, MSS can be upgraded. */ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) skb_increase_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } return 0; } static u32 __bpf_skb_max_len(const struct sk_buff *skb) { return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : SKB_MAX_ALLOC; } BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32, mode, u64, flags) { u32 len_cur, len_diff_abs = abs(len_diff); u32 len_min = bpf_skb_net_base_len(skb); u32 len_max = __bpf_skb_max_len(skb); __be16 proto = skb->protocol; bool shrink = len_diff < 0; u32 off; int ret; if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK)) return -EINVAL; if (unlikely(len_diff_abs > 0xfffU)) return -EFAULT; if (unlikely(proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))) return -ENOTSUPP; off = skb_mac_header_len(skb); switch (mode) { case BPF_ADJ_ROOM_NET: off += bpf_skb_net_base_len(skb); break; case BPF_ADJ_ROOM_MAC: break; default: return -ENOTSUPP; } len_cur = skb->len - skb_network_offset(skb); if ((shrink && (len_diff_abs >= len_cur || len_cur - len_diff_abs < len_min)) || (!shrink && (skb->len + len_diff_abs > len_max && !skb_is_gso(skb)))) return -ENOTSUPP; ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : bpf_skb_net_grow(skb, off, len_diff_abs, flags); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_adjust_room_proto = { .func = bpf_skb_adjust_room, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static u32 __bpf_skb_min_len(const struct sk_buff *skb) { u32 min_len = skb_network_offset(skb); if (skb_transport_header_was_set(skb)) min_len = skb_transport_offset(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) min_len = skb_checksum_start_offset(skb) + skb->csum_offset + sizeof(__sum16); return min_len; } static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) { unsigned int old_len = skb->len; int ret; ret = __skb_grow_rcsum(skb, new_len); if (!ret) memset(skb->data + old_len, 0, new_len - old_len); return ret; } static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) { return __skb_trim_rcsum(skb, new_len); } static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 min_len = __bpf_skb_min_len(skb); int ret; if (unlikely(flags || new_len > max_len || new_len < min_len)) return -EINVAL; if (skb->encapsulation) return -ENOTSUPP; /* The basic idea of this helper is that it's performing the * needed work to either grow or trim an skb, and eBPF program * rewrites the rest via helpers like bpf_skb_store_bytes(), * bpf_lX_csum_replace() and others rather than passing a raw * buffer here. This one is a slow path helper and intended * for replies with control messages. * * Like in bpf_skb_change_proto(), we want to keep this rather * minimal and without protocol specifics so that we are able * to separate concerns as in bpf_skb_store_bytes() should only * be the one responsible for writing buffers. * * It's really expected to be a slow path operation here for * control message replies, so we're implicitly linearizing, * uncloning and drop offloads from the skb by this. */ ret = __bpf_try_make_writable(skb, skb->len); if (!ret) { if (new_len > skb->len) ret = bpf_skb_grow_rcsum(skb, new_len); else if (new_len < skb->len) ret = bpf_skb_trim_rcsum(skb, new_len); if (!ret && skb_is_gso(skb)) skb_gso_reset(skb); } return ret; } BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { int ret = __bpf_skb_change_tail(skb, new_len, flags); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_tail_proto = { .func = bpf_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { int ret = __bpf_skb_change_tail(skb, new_len, flags); bpf_compute_data_end_sk_skb(skb); return ret; } static const struct bpf_func_proto sk_skb_change_tail_proto = { .func = sk_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 new_len = skb->len + head_room; int ret; if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || new_len < skb->len)) return -EINVAL; ret = skb_cow(skb, head_room); if (likely(!ret)) { /* Idea for this helper is that we currently only * allow to expand on mac header. This means that * skb->protocol network header, etc, stay as is. * Compared to bpf_skb_change_tail(), we're more * flexible due to not needing to linearize or * reset GSO. Intention for this helper is to be * used by an L3 skb that needs to push mac header * for redirection into L2 device. */ __skb_push(skb, head_room); memset(skb->data, 0, head_room); skb_reset_mac_header(skb); } return ret; } BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { int ret = __bpf_skb_change_head(skb, head_room, flags); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_head_proto = { .func = bpf_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { int ret = __bpf_skb_change_head(skb, head_room, flags); bpf_compute_data_end_sk_skb(skb); return ret; } static const struct bpf_func_proto sk_skb_change_head_proto = { .func = sk_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : xdp->data - xdp->data_meta; } BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); unsigned long metalen = xdp_get_metalen(xdp); void *data_start = xdp_frame_end + metalen; void *data = xdp->data + offset; if (unlikely(data < data_start || data > xdp->data_end - ETH_HLEN)) return -EINVAL; if (metalen) memmove(xdp->data_meta + offset, xdp->data_meta, metalen); xdp->data_meta += offset; xdp->data = data; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .func = bpf_xdp_adjust_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) { void *data_end = xdp->data_end + offset; /* only shrinking is allowed for now. */ if (unlikely(offset >= 0)) return -EINVAL; if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; xdp->data_end = data_end; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { .func = bpf_xdp_adjust_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); void *meta = xdp->data_meta + offset; unsigned long metalen = xdp->data - meta; if (xdp_data_meta_unsupported(xdp)) return -ENOTSUPP; if (unlikely(meta < xdp_frame_end || meta > xdp->data)) return -EINVAL; if (unlikely((metalen & (sizeof(__u32) - 1)) || (metalen > 32))) return -EACCES; xdp->data_meta = meta; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { .func = bpf_xdp_adjust_meta, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static int __bpf_tx_xdp(struct net_device *dev, struct bpf_map *map, struct xdp_buff *xdp, u32 index) { struct xdp_frame *xdpf; int err, sent; if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; } err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); if (unlikely(err)) return err; xdpf = convert_to_xdp_frame(xdp); if (unlikely(!xdpf)) return -EOVERFLOW; sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH); if (sent <= 0) return sent; return 0; } static noinline int xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) { struct net_device *fwd; u32 index = ri->ifindex; int err; fwd = dev_get_by_index_rcu(dev_net(dev), index); ri->ifindex = 0; if (unlikely(!fwd)) { err = -EINVAL; goto err; } err = __bpf_tx_xdp(fwd, NULL, xdp, 0); if (unlikely(err)) goto err; _trace_xdp_redirect(dev, xdp_prog, index); return 0; err: _trace_xdp_redirect_err(dev, xdp_prog, index, err); return err; } static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_map *map, struct xdp_buff *xdp, u32 index) { int err; switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: { struct bpf_dtab_netdev *dst = fwd; err = dev_map_enqueue(dst, xdp, dev_rx); if (unlikely(err)) return err; __dev_map_insert_ctx(map, index); break; } case BPF_MAP_TYPE_CPUMAP: { struct bpf_cpu_map_entry *rcpu = fwd; err = cpu_map_enqueue(rcpu, xdp, dev_rx); if (unlikely(err)) return err; __cpu_map_insert_ctx(map, index); break; } case BPF_MAP_TYPE_XSKMAP: { struct xdp_sock *xs = fwd; err = __xsk_map_redirect(map, xdp, xs); return err; } default: break; } return 0; } void xdp_do_flush_map(void) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = ri->map_to_flush; ri->map_to_flush = NULL; if (map) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: __dev_map_flush(map); break; case BPF_MAP_TYPE_CPUMAP: __cpu_map_flush(map); break; case BPF_MAP_TYPE_XSKMAP: __xsk_map_flush(map); break; default: break; } } } EXPORT_SYMBOL_GPL(xdp_do_flush_map); static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: return __dev_map_lookup_elem(map, index); case BPF_MAP_TYPE_CPUMAP: return __cpu_map_lookup_elem(map, index); case BPF_MAP_TYPE_XSKMAP: return __xsk_map_lookup_elem(map, index); default: return NULL; } } void bpf_clear_redirect_map(struct bpf_map *map) { struct bpf_redirect_info *ri; int cpu; for_each_possible_cpu(cpu) { ri = per_cpu_ptr(&bpf_redirect_info, cpu); /* Avoid polluting remote cacheline due to writes if * not needed. Once we pass this test, we need the * cmpxchg() to make sure it hasn't been changed in * the meantime by remote CPU. */ if (unlikely(READ_ONCE(ri->map) == map)) cmpxchg(&ri->map, map, NULL); } } static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_map *map, struct bpf_redirect_info *ri) { u32 index = ri->ifindex; void *fwd = NULL; int err; ri->ifindex = 0; WRITE_ONCE(ri->map, NULL); fwd = __xdp_map_lookup_elem(map, index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); if (unlikely(err)) goto err; ri->map_to_flush = map; _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); return 0; err: _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); return err; } int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); if (likely(map)) return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri); } EXPORT_SYMBOL_GPL(xdp_do_redirect); static int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_map *map) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->ifindex; void *fwd = NULL; int err = 0; ri->ifindex = 0; WRITE_ONCE(ri->map, NULL); fwd = __xdp_map_lookup_elem(map, index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } if (map->map_type == BPF_MAP_TYPE_DEVMAP) { struct bpf_dtab_netdev *dst = fwd; err = dev_map_generic_redirect(dst, skb, xdp_prog); if (unlikely(err)) goto err; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { struct xdp_sock *xs = fwd; err = xsk_generic_rcv(xs, xdp); if (err) goto err; consume_skb(skb); } else { /* TODO: Handle BPF_MAP_TYPE_CPUMAP */ err = -EBADRQC; goto err; } _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); return 0; err: _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); return err; } int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); u32 index = ri->ifindex; struct net_device *fwd; int err = 0; if (map) return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, map); ri->ifindex = 0; fwd = dev_get_by_index_rcu(dev_net(dev), index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } err = xdp_ok_fwd_dev(fwd, skb->len); if (unlikely(err)) goto err; skb->dev = fwd; _trace_xdp_redirect(dev, xdp_prog, index); generic_xdp_tx(skb, xdp_prog); return 0; err: _trace_xdp_redirect_err(dev, xdp_prog, index, err); return err; } EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; ri->ifindex = ifindex; ri->flags = flags; WRITE_ONCE(ri->map, NULL); return XDP_REDIRECT; } static const struct bpf_func_proto bpf_xdp_redirect_proto = { .func = bpf_xdp_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; ri->ifindex = ifindex; ri->flags = flags; WRITE_ONCE(ri->map, map); return XDP_REDIRECT; } static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { .func = bpf_xdp_redirect_map, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, unsigned long off, unsigned long len) { void *ptr = skb_header_pointer(skb, off, len, dst_buff); if (unlikely(!ptr)) return len; if (ptr != dst_buff) memcpy(dst_buff, ptr, len); return 0; } BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, u64, flags, void *, meta, u64, meta_size) { u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; if (unlikely(skb_size > skb->len)) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, bpf_skb_copy); } static const struct bpf_func_proto bpf_skb_event_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; static unsigned short bpf_tunnel_key_af(u64 flags) { return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; } BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, u32, size, u64, flags) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); u8 compat[sizeof(struct bpf_tunnel_key)]; void *to_orig = to; int err; if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { err = -EINVAL; goto err_clear; } if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { err = -EPROTO; goto err_clear; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) { err = -EINVAL; switch (size) { case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): goto set_compat; case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ if (ip_tunnel_info_af(info) != AF_INET) goto err_clear; set_compat: to = (struct bpf_tunnel_key *)compat; break; default: goto err_clear; } } to->tunnel_id = be64_to_cpu(info->key.tun_id); to->tunnel_tos = info->key.tos; to->tunnel_ttl = info->key.ttl; to->tunnel_ext = 0; if (flags & BPF_F_TUNINFO_IPV6) { memcpy(to->remote_ipv6, &info->key.u.ipv6.src, sizeof(to->remote_ipv6)); to->tunnel_label = be32_to_cpu(info->key.label); } else { to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); to->tunnel_label = 0; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) memcpy(to_orig, to, size); return 0; err_clear: memset(to_orig, 0, size); return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { .func = bpf_skb_get_tunnel_key, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); int err; if (unlikely(!info || !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { err = -ENOENT; goto err_clear; } if (unlikely(size < info->options_len)) { err = -ENOMEM; goto err_clear; } ip_tunnel_info_opts_get(to, info); if (size > info->options_len) memset(to + info->options_len, 0, size - info->options_len); return info->options_len; err_clear: memset(to, 0, size); return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { .func = bpf_skb_get_tunnel_opt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, }; static struct metadata_dst __percpu *md_dst; BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, const struct bpf_tunnel_key *, from, u32, size, u64, flags) { struct metadata_dst *md = this_cpu_ptr(md_dst); u8 compat[sizeof(struct bpf_tunnel_key)]; struct ip_tunnel_info *info; if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ memcpy(compat, from, size); memset(compat + size, 0, sizeof(compat) - size); from = (const struct bpf_tunnel_key *) compat; break; default: return -EINVAL; } } if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || from->tunnel_ext)) return -EINVAL; skb_dst_drop(skb); dst_hold((struct dst_entry *) md); skb_dst_set(skb, (struct dst_entry *) md); info = &md->u.tun_info; memset(info, 0, sizeof(*info)); info->mode = IP_TUNNEL_INFO_TX; info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; if (flags & BPF_F_DONT_FRAGMENT) info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; if (flags & BPF_F_ZERO_CSUM_TX) info->key.tun_flags &= ~TUNNEL_CSUM; if (flags & BPF_F_SEQ_NUMBER) info->key.tun_flags |= TUNNEL_SEQ; info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; info->key.ttl = from->tunnel_ttl; if (flags & BPF_F_TUNINFO_IPV6) { info->mode |= IP_TUNNEL_INFO_IPV6; memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, sizeof(from->remote_ipv6)); info->key.label = cpu_to_be32(from->tunnel_label) & IPV6_FLOWLABEL_MASK; } else { info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); } return 0; } static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { .func = bpf_skb_set_tunnel_key, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, const u8 *, from, u32, size) { struct ip_tunnel_info *info = skb_tunnel_info(skb); const struct metadata_dst *md = this_cpu_ptr(md_dst); if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) return -EINVAL; if (unlikely(size > IP_TUNNEL_OPTS_MAX)) return -ENOMEM; ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); return 0; } static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { .func = bpf_skb_set_tunnel_opt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto * bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) { if (!md_dst) { struct metadata_dst __percpu *tmp; tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, METADATA_IP_TUNNEL, GFP_KERNEL); if (!tmp) return NULL; if (cmpxchg(&md_dst, NULL, tmp)) metadata_dst_free_percpu(tmp); } switch (which) { case BPF_FUNC_skb_set_tunnel_key: return &bpf_skb_set_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_opt: return &bpf_skb_set_tunnel_opt_proto; default: return NULL; } } BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, u32, idx) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct cgroup *cgrp; struct sock *sk; sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) return -ENOENT; if (unlikely(idx >= array->map.max_entries)) return -E2BIG; cgrp = READ_ONCE(array->ptrs[idx]); if (unlikely(!cgrp)) return -EAGAIN; return sk_under_cgroup_hierarchy(sk, cgrp); } static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { .func = bpf_skb_under_cgroup, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; #ifdef CONFIG_SOCK_CGROUP_DATA BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) { struct sock *sk = skb_to_full_sk(skb); struct cgroup *cgrp; if (!sk || !sk_fullsock(sk)) return 0; cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return cgrp->kn->id.id; } static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { .func = bpf_skb_cgroup_id, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, ancestor_level) { struct sock *sk = skb_to_full_sk(skb); struct cgroup *ancestor; struct cgroup *cgrp; if (!sk || !sk_fullsock(sk)) return 0; cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ancestor = cgroup_ancestor(cgrp, ancestor_level); if (!ancestor) return 0; return ancestor->kn->id.id; } static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { .func = bpf_skb_ancestor_cgroup_id, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; #endif static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, unsigned long off, unsigned long len) { memcpy(dst_buff, src_buff + off, len); return 0; } BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, u64, flags, void *, meta, u64, meta_size) { u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, xdp->data, xdp_size, bpf_xdp_copy); } static const struct bpf_func_proto bpf_xdp_event_output_proto = { .func = bpf_xdp_event_output, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) { return skb->sk ? sock_gen_cookie(skb->sk) : 0; } static const struct bpf_func_proto bpf_get_socket_cookie_proto = { .func = bpf_get_socket_cookie, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) { return sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { .func = bpf_get_socket_cookie_sock_addr, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) { return sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { .func = bpf_get_socket_cookie_sock_ops, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) { struct sock *sk = sk_to_full_sk(skb->sk); kuid_t kuid; if (!sk || !sk_fullsock(sk)) return overflowuid; kuid = sock_net_uid(sock_net(sk), sk); return from_kuid_munged(sock_net(sk)->user_ns, kuid); } static const struct bpf_func_proto bpf_get_socket_uid_proto = { .func = bpf_get_socket_uid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, u64, flags, void *, data, u64, size) { if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; return bpf_event_output(map, flags, data, size, NULL, 0, NULL); } static const struct bpf_func_proto bpf_sockopt_event_output_proto = { .func = bpf_sockopt_event_output, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { struct sock *sk = bpf_sock->sk; int ret = 0; int val; if (!sk_fullsock(sk)) return -EINVAL; if (level == SOL_SOCKET) { if (optlen != sizeof(int)) return -EINVAL; val = *((int *)optval); /* Only some socketops are supported */ switch (optname) { case SO_RCVBUF: val = min_t(u32, val, sysctl_rmem_max); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_SNDBUF: val = min_t(u32, val, sysctl_wmem_max); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); break; case SO_MAX_PACING_RATE: /* 32bit version */ if (val != ~0U) cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); break; case SO_PRIORITY: sk->sk_priority = val; break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_MARK: if (sk->sk_mark != val) { sk->sk_mark = val; sk_dst_reset(sk); } break; default: ret = -EINVAL; } #ifdef CONFIG_INET } else if (level == SOL_IP) { if (optlen != sizeof(int) || sk->sk_family != AF_INET) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case IP_TOS: if (val < -1 || val > 0xff) { ret = -EINVAL; } else { struct inet_sock *inet = inet_sk(sk); if (val == -1) val = 0; inet->tos = val; } break; default: ret = -EINVAL; } #if IS_ENABLED(CONFIG_IPV6) } else if (level == SOL_IPV6) { if (optlen != sizeof(int) || sk->sk_family != AF_INET6) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case IPV6_TCLASS: if (val < -1 || val > 0xff) { ret = -EINVAL; } else { struct ipv6_pinfo *np = inet6_sk(sk); if (val == -1) val = 0; np->tclass = val; } break; default: ret = -EINVAL; } #endif } else if (level == SOL_TCP && sk->sk_prot->setsockopt == tcp_setsockopt) { if (optname == TCP_CONGESTION) { char name[TCP_CA_NAME_MAX]; bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN; strncpy(name, optval, min_t(long, optlen, TCP_CA_NAME_MAX-1)); name[TCP_CA_NAME_MAX-1] = 0; ret = tcp_set_congestion_control(sk, name, false, reinit); } else { struct tcp_sock *tp = tcp_sk(sk); if (optlen != sizeof(int)) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case TCP_BPF_IW: if (val <= 0 || tp->data_segs_out > tp->syn_data) ret = -EINVAL; else tp->snd_cwnd = val; break; case TCP_BPF_SNDCWND_CLAMP: if (val <= 0) { ret = -EINVAL; } else { tp->snd_cwnd_clamp = val; tp->snd_ssthresh = val; } break; case TCP_SAVE_SYN: if (val < 0 || val > 1) ret = -EINVAL; else tp->save_syn = val; break; default: ret = -EINVAL; } } #endif } else { ret = -EINVAL; } return ret; } static const struct bpf_func_proto bpf_setsockopt_proto = { .func = bpf_setsockopt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE, }; BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { struct sock *sk = bpf_sock->sk; if (!sk_fullsock(sk)) goto err_clear; #ifdef CONFIG_INET if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { struct inet_connection_sock *icsk; struct tcp_sock *tp; switch (optname) { case TCP_CONGESTION: icsk = inet_csk(sk); if (!icsk->icsk_ca_ops || optlen <= 1) goto err_clear; strncpy(optval, icsk->icsk_ca_ops->name, optlen); optval[optlen - 1] = 0; break; case TCP_SAVED_SYN: tp = tcp_sk(sk); if (optlen <= 0 || !tp->saved_syn || optlen > tp->saved_syn[0]) goto err_clear; memcpy(optval, tp->saved_syn + 1, optlen); break; default: goto err_clear; } } else if (level == SOL_IP) { struct inet_sock *inet = inet_sk(sk); if (optlen != sizeof(int) || sk->sk_family != AF_INET) goto err_clear; /* Only some options are supported */ switch (optname) { case IP_TOS: *((int *)optval) = (int)inet->tos; break; default: goto err_clear; } #if IS_ENABLED(CONFIG_IPV6) } else if (level == SOL_IPV6) { struct ipv6_pinfo *np = inet6_sk(sk); if (optlen != sizeof(int) || sk->sk_family != AF_INET6) goto err_clear; /* Only some options are supported */ switch (optname) { case IPV6_TCLASS: *((int *)optval) = (int)np->tclass; break; default: goto err_clear; } #endif } else { goto err_clear; } return 0; #endif err_clear: memset(optval, 0, optlen); return -EINVAL; } static const struct bpf_func_proto bpf_getsockopt_proto = { .func = bpf_getsockopt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_UNINIT_MEM, .arg5_type = ARG_CONST_SIZE, }; BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, int, argval) { struct sock *sk = bpf_sock->sk; int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) return -EINVAL; tcp_sk(sk)->bpf_sock_ops_cb_flags = val; return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); } static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { .func = bpf_sock_ops_cb_flags_set, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; EXPORT_SYMBOL_GPL(ipv6_bpf_stub); BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, int, addr_len) { #ifdef CONFIG_INET struct sock *sk = ctx->sk; int err; /* Binding to port can be expensive so it's prohibited in the helper. * Only binding to IP is supported. */ err = -EINVAL; if (addr_len < offsetofend(struct sockaddr, sa_family)) return err; if (addr->sa_family == AF_INET) { if (addr_len < sizeof(struct sockaddr_in)) return err; if (((struct sockaddr_in *)addr)->sin_port != htons(0)) return err; return __inet_bind(sk, addr, addr_len, true, false); #if IS_ENABLED(CONFIG_IPV6) } else if (addr->sa_family == AF_INET6) { if (addr_len < SIN6_LEN_RFC2133) return err; if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) return err; /* ipv6_bpf_stub cannot be NULL, since it's called from * bpf_cgroup_inet6_connect hook and ipv6 is already loaded */ return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false); #endif /* CONFIG_IPV6 */ } #endif /* CONFIG_INET */ return -EAFNOSUPPORT; } static const struct bpf_func_proto bpf_bind_proto = { .func = bpf_bind, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, }; #ifdef CONFIG_XFRM BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, struct bpf_xfrm_state *, to, u32, size, u64, flags) { const struct sec_path *sp = skb_sec_path(skb); const struct xfrm_state *x; if (!sp || unlikely(index >= sp->len || flags)) goto err_clear; x = sp->xvec[index]; if (unlikely(size != sizeof(struct bpf_xfrm_state))) goto err_clear; to->reqid = x->props.reqid; to->spi = x->id.spi; to->family = x->props.family; to->ext = 0; if (to->family == AF_INET6) { memcpy(to->remote_ipv6, x->props.saddr.a6, sizeof(to->remote_ipv6)); } else { to->remote_ipv4 = x->props.saddr.a4; memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); } return 0; err_clear: memset(to, 0, size); return -EINVAL; } static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { .func = bpf_skb_get_xfrm_state, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; #endif #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, const struct neighbour *neigh, const struct net_device *dev) { memcpy(params->dmac, neigh->ha, ETH_ALEN); memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; params->ifindex = dev->ifindex; return 0; } #endif #if IS_ENABLED(CONFIG_INET) static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 flags, bool check_mtu) { struct fib_nh_common *nhc; struct in_device *in_dev; struct neighbour *neigh; struct net_device *dev; struct fib_result res; struct flowi4 fl4; int err; u32 mtu; dev = dev_get_by_index_rcu(net, params->ifindex); if (unlikely(!dev)) return -ENODEV; /* verify forwarding is enabled on this interface */ in_dev = __in_dev_get_rcu(dev); if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl4.flowi4_iif = 1; fl4.flowi4_oif = params->ifindex; } else { fl4.flowi4_iif = params->ifindex; fl4.flowi4_oif = 0; } fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_flags = 0; fl4.flowi4_proto = params->l4_protocol; fl4.daddr = params->ipv4_dst; fl4.saddr = params->ipv4_src; fl4.fl4_sport = params->sport; fl4.fl4_dport = params->dport; if (flags & BPF_FIB_LOOKUP_DIRECT) { u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib_table *tb; tb = fib_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); } else { fl4.flowi4_mark = 0; fl4.flowi4_secid = 0; fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_uid = sock_net_uid(net, NULL); err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); } if (err) { /* map fib lookup errors to RTN_ type */ if (err == -EINVAL) return BPF_FIB_LKUP_RET_BLACKHOLE; if (err == -EHOSTUNREACH) return BPF_FIB_LKUP_RET_UNREACHABLE; if (err == -EACCES) return BPF_FIB_LKUP_RET_PROHIBIT; return BPF_FIB_LKUP_RET_NOT_FWDED; } if (res.type != RTN_UNICAST) return BPF_FIB_LKUP_RET_NOT_FWDED; if (res.fi->fib_nhs > 1) fib_select_path(net, &res, &fl4, NULL); if (check_mtu) { mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); if (params->tot_len > mtu) return BPF_FIB_LKUP_RET_FRAG_NEEDED; } nhc = res.nhc; /* do not handle lwt encaps right now */ if (nhc->nhc_lwtstate) return BPF_FIB_LKUP_RET_UNSUPP_LWT; dev = nhc->nhc_dev; params->rt_metric = res.fi->fib_priority; /* xdp and cls_bpf programs are run in RCU-bh so * rcu_read_lock_bh is not needed here */ if (likely(nhc->nhc_gw_family != AF_INET6)) { if (nhc->nhc_gw_family) params->ipv4_dst = nhc->nhc_gw.ipv4; neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); } else { struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; params->family = AF_INET6; *dst = nhc->nhc_gw.ipv6; neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); } if (!neigh) return BPF_FIB_LKUP_RET_NO_NEIGH; return bpf_fib_set_fwd_params(params, neigh, dev); } #endif #if IS_ENABLED(CONFIG_IPV6) static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 flags, bool check_mtu) { struct in6_addr *src = (struct in6_addr *) params->ipv6_src; struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; struct fib6_result res = {}; struct neighbour *neigh; struct net_device *dev; struct inet6_dev *idev; struct flowi6 fl6; int strict = 0; int oif, err; u32 mtu; /* link local addresses are never forwarded */ if (rt6_need_strict(dst) || rt6_need_strict(src)) return BPF_FIB_LKUP_RET_NOT_FWDED; dev = dev_get_by_index_rcu(net, params->ifindex); if (unlikely(!dev)) return -ENODEV; idev = __in6_dev_get_safely(dev); if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl6.flowi6_iif = 1; oif = fl6.flowi6_oif = params->ifindex; } else { oif = fl6.flowi6_iif = params->ifindex; fl6.flowi6_oif = 0; strict = RT6_LOOKUP_F_HAS_SADDR; } fl6.flowlabel = params->flowinfo; fl6.flowi6_scope = 0; fl6.flowi6_flags = 0; fl6.mp_hash = 0; fl6.flowi6_proto = params->l4_protocol; fl6.daddr = *dst; fl6.saddr = *src; fl6.fl6_sport = params->sport; fl6.fl6_dport = params->dport; if (flags & BPF_FIB_LOOKUP_DIRECT) { u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib6_table *tb; tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res, strict); } else { fl6.flowi6_mark = 0; fl6.flowi6_secid = 0; fl6.flowi6_tun_key.tun_id = 0; fl6.flowi6_uid = sock_net_uid(net, NULL); err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict); } if (unlikely(err || IS_ERR_OR_NULL(res.f6i) || res.f6i == net->ipv6.fib6_null_entry)) return BPF_FIB_LKUP_RET_NOT_FWDED; switch (res.fib6_type) { /* only unicast is forwarded */ case RTN_UNICAST: break; case RTN_BLACKHOLE: return BPF_FIB_LKUP_RET_BLACKHOLE; case RTN_UNREACHABLE: return BPF_FIB_LKUP_RET_UNREACHABLE; case RTN_PROHIBIT: return BPF_FIB_LKUP_RET_PROHIBIT; default: return BPF_FIB_LKUP_RET_NOT_FWDED; } ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif, fl6.flowi6_oif != 0, NULL, strict); if (check_mtu) { mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src); if (params->tot_len > mtu) return BPF_FIB_LKUP_RET_FRAG_NEEDED; } if (res.nh->fib_nh_lws) return BPF_FIB_LKUP_RET_UNSUPP_LWT; if (res.nh->fib_nh_gw_family) *dst = res.nh->fib_nh_gw6; dev = res.nh->fib_nh_dev; params->rt_metric = res.f6i->fib6_metric; /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is * not needed here. */ neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); if (!neigh) return BPF_FIB_LKUP_RET_NO_NEIGH; return bpf_fib_set_fwd_params(params, neigh, dev); } #endif BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, struct bpf_fib_lookup *, params, int, plen, u32, flags) { if (plen < sizeof(*params)) return -EINVAL; if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) return -EINVAL; switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, flags, true); #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, flags, true); #endif } return -EAFNOSUPPORT; } static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = { .func = bpf_xdp_fib_lookup, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, struct bpf_fib_lookup *, params, int, plen, u32, flags) { struct net *net = dev_net(skb->dev); int rc = -EAFNOSUPPORT; if (plen < sizeof(*params)) return -EINVAL; if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) return -EINVAL; switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: rc = bpf_ipv4_fib_lookup(net, params, flags, false); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: rc = bpf_ipv6_fib_lookup(net, params, flags, false); break; #endif } if (!rc) { struct net_device *dev; dev = dev_get_by_index_rcu(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; } return rc; } static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { .func = bpf_skb_fib_lookup, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) { int err; struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; if (!seg6_validate_srh(srh, len)) return -EINVAL; switch (type) { case BPF_LWT_ENCAP_SEG6_INLINE: if (skb->protocol != htons(ETH_P_IPV6)) return -EBADMSG; err = seg6_do_srh_inline(skb, srh); break; case BPF_LWT_ENCAP_SEG6: skb_reset_inner_headers(skb); skb->encapsulation = 1; err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); break; default: return -EINVAL; } bpf_compute_data_pointers(skb); if (err) return err; ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); return seg6_lookup_nexthop(skb, NULL, 0); } #endif /* CONFIG_IPV6_SEG6_BPF */ #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress) { return bpf_lwt_push_ip_encap(skb, hdr, len, ingress); } #endif BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, u32, len) { switch (type) { #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) case BPF_LWT_ENCAP_SEG6: case BPF_LWT_ENCAP_SEG6_INLINE: return bpf_push_seg6_encap(skb, type, hdr, len); #endif #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) case BPF_LWT_ENCAP_IP: return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); #endif default: return -EINVAL; } } BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, u32, len) { switch (type) { #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) case BPF_LWT_ENCAP_IP: return bpf_push_ip_encap(skb, hdr, len, false /* egress */); #endif default: return -EINVAL; } } static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { .func = bpf_lwt_in_push_encap, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { .func = bpf_lwt_xmit_push_encap, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_tlvs, *srh_end, *ptr; int srhoff = 0; if (srh == NULL) return -EINVAL; srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); ptr = skb->data + offset; if (ptr >= srh_tlvs && ptr + len <= srh_end) srh_state->valid = false; else if (ptr < (void *)&srh->flags || ptr + len > (void *)&srh->segments) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) return -EINVAL; srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); memcpy(skb->data + offset, from, len); return 0; } static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { .func = bpf_lwt_seg6_store_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; static void bpf_update_srh_state(struct sk_buff *skb) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); int srhoff = 0; if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { srh_state->srh = NULL; } else { srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_state->hdrlen = srh_state->srh->hdrlen << 3; srh_state->valid = true; } } BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, u32, action, void *, param, u32, param_len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); int hdroff = 0; int err; switch (action) { case SEG6_LOCAL_ACTION_END_X: if (!seg6_bpf_has_valid_srh(skb)) return -EBADMSG; if (param_len != sizeof(struct in6_addr)) return -EINVAL; return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); case SEG6_LOCAL_ACTION_END_T: if (!seg6_bpf_has_valid_srh(skb)) return -EBADMSG; if (param_len != sizeof(int)) return -EINVAL; return seg6_lookup_nexthop(skb, NULL, *(int *)param); case SEG6_LOCAL_ACTION_END_DT6: if (!seg6_bpf_has_valid_srh(skb)) return -EBADMSG; if (param_len != sizeof(int)) return -EINVAL; if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) return -EBADMSG; if (!pskb_pull(skb, hdroff)) return -EBADMSG; skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->encapsulation = 0; bpf_compute_data_pointers(skb); bpf_update_srh_state(skb); return seg6_lookup_nexthop(skb, NULL, *(int *)param); case SEG6_LOCAL_ACTION_END_B6: if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) return -EBADMSG; err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, param, param_len); if (!err) bpf_update_srh_state(skb); return err; case SEG6_LOCAL_ACTION_END_B6_ENCAP: if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) return -EBADMSG; err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, param, param_len); if (!err) bpf_update_srh_state(skb); return err; default: return -EINVAL; } } static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { .func = bpf_lwt_seg6_action, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, s32, len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_end, *srh_tlvs, *ptr; struct ipv6hdr *hdr; int srhoff = 0; int ret; if (unlikely(srh == NULL)) return -EINVAL; srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + ((srh->first_segment + 1) << 4)); srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + srh_state->hdrlen); ptr = skb->data + offset; if (unlikely(ptr < srh_tlvs || ptr > srh_end)) return -EFAULT; if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) return -EFAULT; if (len > 0) { ret = skb_cow_head(skb, len); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_push(skb, offset, len); } else { ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); } bpf_compute_data_pointers(skb); if (unlikely(ret < 0)) return ret; hdr = (struct ipv6hdr *)skb->data; hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) return -EINVAL; srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_state->hdrlen += len; srh_state->valid = false; return 0; } static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { .func = bpf_lwt_seg6_adjust_srh, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; #endif /* CONFIG_IPV6_SEG6_BPF */ #define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \ do { \ switch (si->off) { \ case offsetof(md_type, snd_cwnd): \ CONVERT(snd_cwnd); break; \ case offsetof(md_type, srtt_us): \ CONVERT(srtt_us); break; \ case offsetof(md_type, snd_ssthresh): \ CONVERT(snd_ssthresh); break; \ case offsetof(md_type, rcv_nxt): \ CONVERT(rcv_nxt); break; \ case offsetof(md_type, snd_nxt): \ CONVERT(snd_nxt); break; \ case offsetof(md_type, snd_una): \ CONVERT(snd_una); break; \ case offsetof(md_type, mss_cache): \ CONVERT(mss_cache); break; \ case offsetof(md_type, ecn_flags): \ CONVERT(ecn_flags); break; \ case offsetof(md_type, rate_delivered): \ CONVERT(rate_delivered); break; \ case offsetof(md_type, rate_interval_us): \ CONVERT(rate_interval_us); break; \ case offsetof(md_type, packets_out): \ CONVERT(packets_out); break; \ case offsetof(md_type, retrans_out): \ CONVERT(retrans_out); break; \ case offsetof(md_type, total_retrans): \ CONVERT(total_retrans); break; \ case offsetof(md_type, segs_in): \ CONVERT(segs_in); break; \ case offsetof(md_type, data_segs_in): \ CONVERT(data_segs_in); break; \ case offsetof(md_type, segs_out): \ CONVERT(segs_out); break; \ case offsetof(md_type, data_segs_out): \ CONVERT(data_segs_out); break; \ case offsetof(md_type, lost_out): \ CONVERT(lost_out); break; \ case offsetof(md_type, sacked_out): \ CONVERT(sacked_out); break; \ case offsetof(md_type, bytes_received): \ CONVERT(bytes_received); break; \ case offsetof(md_type, bytes_acked): \ CONVERT(bytes_acked); break; \ } \ } while (0) #ifdef CONFIG_INET static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, int dif, int sdif, u8 family, u8 proto) { bool refcounted = false; struct sock *sk = NULL; if (family == AF_INET) { __be32 src4 = tuple->ipv4.saddr; __be32 dst4 = tuple->ipv4.daddr; if (proto == IPPROTO_TCP) sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0, src4, tuple->ipv4.sport, dst4, tuple->ipv4.dport, dif, sdif, &refcounted); else sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, dst4, tuple->ipv4.dport, dif, sdif, &udp_table, NULL); #if IS_ENABLED(CONFIG_IPV6) } else { struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; if (proto == IPPROTO_TCP) sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0, src6, tuple->ipv6.sport, dst6, ntohs(tuple->ipv6.dport), dif, sdif, &refcounted); else if (likely(ipv6_bpf_stub)) sk = ipv6_bpf_stub->udp6_lib_lookup(net, src6, tuple->ipv6.sport, dst6, tuple->ipv6.dport, dif, sdif, &udp_table, NULL); #endif } if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); sk = NULL; } return sk; } /* bpf_skc_lookup performs the core lookup for different types of sockets, * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. * Returns the socket as an 'unsigned long' to simplify the casting in the * callers to satisfy BPF_CALL declarations. */ static struct sock * __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = NULL; u8 family = AF_UNSPEC; struct net *net; int sdif; if (len == sizeof(tuple->ipv4)) family = AF_INET; else if (len == sizeof(tuple->ipv6)) family = AF_INET6; else return NULL; if (unlikely(family == AF_UNSPEC || flags || !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; if (family == AF_INET) sdif = inet_sdif(skb); else sdif = inet6_sdif(skb); if ((s32)netns_id < 0) { net = caller_net; sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); } else { net = get_net_ns_by_id(caller_net, netns_id); if (unlikely(!net)) goto out; sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); put_net(net); } out: return sk; } static struct sock * __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, netns_id, flags); if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return NULL; } } return sk; } static struct sock * bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, u8 proto, u64 netns_id, u64 flags) { struct net *caller_net; int ifindex; if (skb->dev) { caller_net = dev_net(skb->dev); ifindex = skb->dev->ifindex; } else { caller_net = sock_net(skb->sk); ifindex = 0; } return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, netns_id, flags); } static struct sock * bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, flags); if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return NULL; } } return sk; } BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { .func = bpf_skc_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { .func = bpf_sk_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { .func = bpf_sk_lookup_udp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_sk_release, struct sock *, sk) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return 0; } static const struct bpf_func_proto bpf_sk_release_proto = { .func = bpf_sk_release, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_SOCK_COMMON, }; BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { .func = bpf_xdp_sk_lookup_udp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { .func = bpf_xdp_skc_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { .func = bpf_xdp_sk_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { .func = bpf_sock_addr_skc_lookup_tcp, .gpl_only = false, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { .func = bpf_sock_addr_sk_lookup_tcp, .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { .func = bpf_sock_addr_sk_lookup_udp, .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked)) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct bpf_tcp_sock, bytes_received): case offsetof(struct bpf_tcp_sock, bytes_acked): return size == sizeof(__u64); default: return size == sizeof(__u32); } } u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ do { \ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \ FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ si->dst_reg, si->src_reg, \ offsetof(struct tcp_sock, FIELD)); \ } while (0) CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock, BPF_TCP_SOCK_GET_COMMON); if (insn > insn_buf) return insn - insn_buf; switch (si->off) { case offsetof(struct bpf_tcp_sock, rtt_min): BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct tcp_sock, rtt_min) + offsetof(struct minmax_sample, v)); break; } return insn - insn_buf; } BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) { if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) return (unsigned long)sk; return (unsigned long)NULL; } static const struct bpf_func_proto bpf_tcp_sock_proto = { .func = bpf_tcp_sock, .gpl_only = false, .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, .arg1_type = ARG_PTR_TO_SOCK_COMMON, }; BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) { sk = sk_to_full_sk(sk); if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) return (unsigned long)sk; return (unsigned long)NULL; } static const struct bpf_func_proto bpf_get_listener_sock_proto = { .func = bpf_get_listener_sock, .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_SOCK_COMMON, }; BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) { unsigned int iphdr_len; if (skb->protocol == cpu_to_be16(ETH_P_IP)) iphdr_len = sizeof(struct iphdr); else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) iphdr_len = sizeof(struct ipv6hdr); else return 0; if (skb_headlen(skb) < iphdr_len) return 0; if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len)) return 0; return INET_ECN_set_ce(skb); } static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { .func = bpf_skb_ecn_set_ce, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len, struct tcphdr *, th, u32, th_len) { #ifdef CONFIG_SYN_COOKIES u32 cookie; int ret; if (unlikely(th_len < sizeof(*th))) return -EINVAL; /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL; if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) return -EINVAL; if (!th->ack || th->rst || th->syn) return -ENOENT; if (tcp_synq_no_recent_overflow(sk)) return -ENOENT; cookie = ntohl(th->ack_seq) - 1; switch (sk->sk_family) { case AF_INET: if (unlikely(iph_len < sizeof(struct iphdr))) return -EINVAL; ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); break; #if IS_BUILTIN(CONFIG_IPV6) case AF_INET6: if (unlikely(iph_len < sizeof(struct ipv6hdr))) return -EINVAL; ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); break; #endif /* CONFIG_IPV6 */ default: return -EPROTONOSUPPORT; } if (ret > 0) return 0; return -ENOENT; #else return -ENOTSUPP; #endif } static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { .func = bpf_tcp_check_syncookie, .gpl_only = true, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_SOCK_COMMON, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE, }; #endif /* CONFIG_INET */ bool bpf_helper_changes_pkt_data(void *func) { if (func == bpf_skb_vlan_push || func == bpf_skb_vlan_pop || func == bpf_skb_store_bytes || func == bpf_skb_change_proto || func == bpf_skb_change_head || func == sk_skb_change_head || func == bpf_skb_change_tail || func == sk_skb_change_tail || func == bpf_skb_adjust_room || func == bpf_skb_pull_data || func == sk_skb_pull_data || func == bpf_clone_redirect || func == bpf_l3_csum_replace || func == bpf_l4_csum_replace || func == bpf_xdp_adjust_head || func == bpf_xdp_adjust_meta || func == bpf_msg_pull_data || func == bpf_msg_push_data || func == bpf_msg_pop_data || func == bpf_xdp_adjust_tail || #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) func == bpf_lwt_seg6_store_bytes || func == bpf_lwt_seg6_adjust_srh || func == bpf_lwt_seg6_action || #endif func == bpf_lwt_in_push_encap || func == bpf_lwt_xmit_push_encap) return true; return false; } static const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) { switch (func_id) { case BPF_FUNC_map_lookup_elem: return &bpf_map_lookup_elem_proto; case BPF_FUNC_map_update_elem: return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; case BPF_FUNC_map_push_elem: return &bpf_map_push_elem_proto; case BPF_FUNC_map_pop_elem: return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_raw_smp_processor_id_proto; case BPF_FUNC_get_numa_node_id: return &bpf_get_numa_node_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; default: break; } if (!capable(CAP_SYS_ADMIN)) return NULL; switch (func_id) { case BPF_FUNC_spin_lock: return &bpf_spin_lock_proto; case BPF_FUNC_spin_unlock: return &bpf_spin_unlock_proto; case BPF_FUNC_trace_printk: return bpf_get_trace_printk_proto(); default: return NULL; } } static const struct bpf_func_proto * sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { /* inet and inet6 sockets are created in a process * context so there is always a valid uid/gid */ case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { /* inet and inet6 sockets are created in a process * context so there is always a valid uid/gid */ case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; case BPF_FUNC_bind: switch (prog->expected_attach_type) { case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_INET6_CONNECT: return &bpf_bind_proto; default: return NULL; } case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_addr_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: return &bpf_sock_addr_sk_lookup_tcp_proto; case BPF_FUNC_sk_lookup_udp: return &bpf_sock_addr_sk_lookup_udp_proto; case BPF_FUNC_sk_release: return &bpf_sk_release_proto; case BPF_FUNC_skc_lookup_tcp: return &bpf_sock_addr_skc_lookup_tcp_proto; #endif /* CONFIG_INET */ default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_load_bytes_relative: return &bpf_skb_load_bytes_relative_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: return &bpf_get_socket_uid_proto; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto bpf_sk_storage_get_proto __weak; const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; static const struct bpf_func_proto * cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; case BPF_FUNC_sk_fullsock: return &bpf_sk_fullsock_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; case BPF_FUNC_get_listener_sock: return &bpf_get_listener_sock_proto; case BPF_FUNC_skb_ecn_set_ce: return &bpf_skb_ecn_set_ce_proto; #endif default: return sk_filter_func_proto(func_id, prog); } } static const struct bpf_func_proto * tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_store_bytes: return &bpf_skb_store_bytes_proto; case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_load_bytes_relative: return &bpf_skb_load_bytes_relative_proto; case BPF_FUNC_skb_pull_data: return &bpf_skb_pull_data_proto; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case BPF_FUNC_csum_update: return &bpf_csum_update_proto; case BPF_FUNC_l3_csum_replace: return &bpf_l3_csum_replace_proto; case BPF_FUNC_l4_csum_replace: return &bpf_l4_csum_replace_proto; case BPF_FUNC_clone_redirect: return &bpf_clone_redirect_proto; case BPF_FUNC_get_cgroup_classid: return &bpf_get_cgroup_classid_proto; case BPF_FUNC_skb_vlan_push: return &bpf_skb_vlan_push_proto; case BPF_FUNC_skb_vlan_pop: return &bpf_skb_vlan_pop_proto; case BPF_FUNC_skb_change_proto: return &bpf_skb_change_proto_proto; case BPF_FUNC_skb_change_type: return &bpf_skb_change_type_proto; case BPF_FUNC_skb_adjust_room: return &bpf_skb_adjust_room_proto; case BPF_FUNC_skb_change_tail: return &bpf_skb_change_tail_proto; case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_skb_get_tunnel_opt: return &bpf_skb_get_tunnel_opt_proto; case BPF_FUNC_skb_set_tunnel_opt: return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; case BPF_FUNC_get_hash_recalc: return &bpf_get_hash_recalc_proto; case BPF_FUNC_set_hash_invalid: return &bpf_set_hash_invalid_proto; case BPF_FUNC_set_hash: return &bpf_set_hash_proto; case BPF_FUNC_perf_event_output: return &bpf_skb_event_output_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: return &bpf_get_socket_uid_proto; case BPF_FUNC_fib_lookup: return &bpf_skb_fib_lookup_proto; case BPF_FUNC_sk_fullsock: return &bpf_sk_fullsock_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; #ifdef CONFIG_XFRM case BPF_FUNC_skb_get_xfrm_state: return &bpf_skb_get_xfrm_state_proto; #endif #ifdef CONFIG_SOCK_CGROUP_DATA case BPF_FUNC_skb_cgroup_id: return &bpf_skb_cgroup_id_proto; case BPF_FUNC_skb_ancestor_cgroup_id: return &bpf_skb_ancestor_cgroup_id_proto; #endif #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: return &bpf_sk_lookup_tcp_proto; case BPF_FUNC_sk_lookup_udp: return &bpf_sk_lookup_udp_proto; case BPF_FUNC_sk_release: return &bpf_sk_release_proto; case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; case BPF_FUNC_get_listener_sock: return &bpf_get_listener_sock_proto; case BPF_FUNC_skc_lookup_tcp: return &bpf_skc_lookup_tcp_proto; case BPF_FUNC_tcp_check_syncookie: return &bpf_tcp_check_syncookie_proto; case BPF_FUNC_skb_ecn_set_ce: return &bpf_skb_ecn_set_ce_proto; #endif default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_xdp_event_output_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case BPF_FUNC_xdp_adjust_head: return &bpf_xdp_adjust_head_proto; case BPF_FUNC_xdp_adjust_meta: return &bpf_xdp_adjust_meta_proto; case BPF_FUNC_redirect: return &bpf_xdp_redirect_proto; case BPF_FUNC_redirect_map: return &bpf_xdp_redirect_map_proto; case BPF_FUNC_xdp_adjust_tail: return &bpf_xdp_adjust_tail_proto; case BPF_FUNC_fib_lookup: return &bpf_xdp_fib_lookup_proto; #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_udp: return &bpf_xdp_sk_lookup_udp_proto; case BPF_FUNC_sk_lookup_tcp: return &bpf_xdp_sk_lookup_tcp_proto; case BPF_FUNC_sk_release: return &bpf_sk_release_proto; case BPF_FUNC_skc_lookup_tcp: return &bpf_xdp_skc_lookup_tcp_proto; case BPF_FUNC_tcp_check_syncookie: return &bpf_tcp_check_syncookie_proto; #endif default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto bpf_sock_map_update_proto __weak; const struct bpf_func_proto bpf_sock_hash_update_proto __weak; static const struct bpf_func_proto * sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_setsockopt: return &bpf_setsockopt_proto; case BPF_FUNC_getsockopt: return &bpf_getsockopt_proto; case BPF_FUNC_sock_ops_cb_flags_set: return &bpf_sock_ops_cb_flags_set_proto; case BPF_FUNC_sock_map_update: return &bpf_sock_map_update_proto; case BPF_FUNC_sock_hash_update: return &bpf_sock_hash_update_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_ops_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: return &bpf_sockopt_event_output_proto; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; static const struct bpf_func_proto * sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_msg_redirect_map: return &bpf_msg_redirect_map_proto; case BPF_FUNC_msg_redirect_hash: return &bpf_msg_redirect_hash_proto; case BPF_FUNC_msg_apply_bytes: return &bpf_msg_apply_bytes_proto; case BPF_FUNC_msg_cork_bytes: return &bpf_msg_cork_bytes_proto; case BPF_FUNC_msg_pull_data: return &bpf_msg_pull_data_proto; case BPF_FUNC_msg_push_data: return &bpf_msg_push_data_proto; case BPF_FUNC_msg_pop_data: return &bpf_msg_pop_data_proto; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; static const struct bpf_func_proto * sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_store_bytes: return &bpf_skb_store_bytes_proto; case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_pull_data: return &sk_skb_pull_data_proto; case BPF_FUNC_skb_change_tail: return &sk_skb_change_tail_proto; case BPF_FUNC_skb_change_head: return &sk_skb_change_head_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: return &bpf_get_socket_uid_proto; case BPF_FUNC_sk_redirect_map: return &bpf_sk_redirect_map_proto; case BPF_FUNC_sk_redirect_hash: return &bpf_sk_redirect_hash_proto; #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: return &bpf_sk_lookup_tcp_proto; case BPF_FUNC_sk_lookup_udp: return &bpf_sk_lookup_udp_proto; case BPF_FUNC_sk_release: return &bpf_sk_release_proto; case BPF_FUNC_skc_lookup_tcp: return &bpf_skc_lookup_tcp_proto; #endif default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_load_bytes: return &bpf_flow_dissector_load_bytes_proto; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_pull_data: return &bpf_skb_pull_data_proto; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case BPF_FUNC_get_cgroup_classid: return &bpf_get_cgroup_classid_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; case BPF_FUNC_get_hash_recalc: return &bpf_get_hash_recalc_proto; case BPF_FUNC_perf_event_output: return &bpf_skb_event_output_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_lwt_push_encap: return &bpf_lwt_in_push_encap_proto; default: return lwt_out_func_proto(func_id, prog); } } static const struct bpf_func_proto * lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_skb_get_tunnel_opt: return &bpf_skb_get_tunnel_opt_proto; case BPF_FUNC_skb_set_tunnel_opt: return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; case BPF_FUNC_clone_redirect: return &bpf_clone_redirect_proto; case BPF_FUNC_skb_change_tail: return &bpf_skb_change_tail_proto; case BPF_FUNC_skb_change_head: return &bpf_skb_change_head_proto; case BPF_FUNC_skb_store_bytes: return &bpf_skb_store_bytes_proto; case BPF_FUNC_csum_update: return &bpf_csum_update_proto; case BPF_FUNC_l3_csum_replace: return &bpf_l3_csum_replace_proto; case BPF_FUNC_l4_csum_replace: return &bpf_l4_csum_replace_proto; case BPF_FUNC_set_hash_invalid: return &bpf_set_hash_invalid_proto; case BPF_FUNC_lwt_push_encap: return &bpf_lwt_xmit_push_encap_proto; default: return lwt_out_func_proto(func_id, prog); } } static const struct bpf_func_proto * lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) case BPF_FUNC_lwt_seg6_store_bytes: return &bpf_lwt_seg6_store_bytes_proto; case BPF_FUNC_lwt_seg6_action: return &bpf_lwt_seg6_action_proto; case BPF_FUNC_lwt_seg6_adjust_srh: return &bpf_lwt_seg6_adjust_srh_proto; #endif default: return lwt_out_func_proto(func_id, prog); } } static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct __sk_buff)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; switch (off) { case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): if (off + size > offsetofend(struct __sk_buff, cb[4])) return false; break; case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; break; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; case bpf_ctx_range(struct __sk_buff, tstamp): if (size != sizeof(__u64)) return false; break; case offsetof(struct __sk_buff, sk): if (type == BPF_WRITE || size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { if (size != size_default) return false; } else { bpf_ctx_record_field_size(info, size_default); if (!bpf_ctx_narrow_access_ok(off, size, size_default)) return false; } } return true; } static bool sk_filter_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; default: return false; } } return bpf_skb_is_valid_access(off, size, type, prog, info); } static bool cg_skb_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, wire_len): return false; case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): if (!capable(CAP_SYS_ADMIN)) return false; break; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; case bpf_ctx_range(struct __sk_buff, tstamp): if (!capable(CAP_SYS_ADMIN)) return false; break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return bpf_skb_is_valid_access(off, size, type, prog, info); } static bool lwt_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return bpf_skb_is_valid_access(off, size, type, prog, info); } /* Attach type specific accesses */ static bool __sock_filter_check_attach_type(int off, enum bpf_access_type access_type, enum bpf_attach_type attach_type) { switch (off) { case offsetof(struct bpf_sock, bound_dev_if): case offsetof(struct bpf_sock, mark): case offsetof(struct bpf_sock, priority): switch (attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: goto full_access; default: return false; } case bpf_ctx_range(struct bpf_sock, src_ip4): switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: goto read_only; default: return false; } case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): switch (attach_type) { case BPF_CGROUP_INET6_POST_BIND: goto read_only; default: return false; } case bpf_ctx_range(struct bpf_sock, src_port): switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: goto read_only; default: return false; } } read_only: return access_type == BPF_READ; full_access: return true; } bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range_till(struct bpf_sock, type, priority): return false; default: return bpf_sock_is_valid_access(off, size, type, info); } } bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock)) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct bpf_sock, state): case offsetof(struct bpf_sock, family): case offsetof(struct bpf_sock, type): case offsetof(struct bpf_sock, protocol): case offsetof(struct bpf_sock, dst_port): case offsetof(struct bpf_sock, src_port): case bpf_ctx_range(struct bpf_sock, src_ip4): case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): case bpf_ctx_range(struct bpf_sock, dst_ip4): case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); } return size == size_default; } static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (!bpf_sock_is_valid_access(off, size, type, info)) return false; return __sock_filter_check_attach_type(off, type, prog->expected_attach_type); } static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { /* Neither direct read nor direct write requires any preliminary * action. */ return 0; } static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog, int drop_verdict) { struct bpf_insn *insn = insn_buf; if (!direct_write) return 0; /* if (!skb->cloned) * goto start; * * (Fast-path, otherwise approximation that we might be * a clone, do the rest in helper.) */ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); /* ret = bpf_skb_pull_data(skb, 0); */ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_pull_data); /* if (!ret) * goto restore; * return TC_ACT_SHOT; */ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); *insn++ = BPF_EXIT_INSN(); /* restore: */ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); /* start: */ *insn++ = prog->insnsi[0]; return insn - insn_buf; } static int bpf_gen_ld_abs(const struct bpf_insn *orig, struct bpf_insn *insn_buf) { bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; /* We're guaranteed here that CTX is in R6. */ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } switch (BPF_SIZE(orig->code)) { case BPF_B: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); break; case BPF_H: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); break; case BPF_W: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); break; } *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); *insn++ = BPF_EXIT_INSN(); return insn - insn_buf; } static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); } static bool tc_cls_act_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, tc_index): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, queue_mapping): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_meta): info->reg_type = PTR_TO_PACKET_META; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } return bpf_skb_is_valid_access(off, size, type, prog, info); } static bool __is_valid_xdp_access(int off, int size) { if (off < 0 || off >= sizeof(struct xdp_md)) return false; if (off % size != 0) return false; if (size != sizeof(__u32)) return false; return true; } static bool xdp_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) { if (bpf_prog_is_dev_bound(prog->aux)) { switch (off) { case offsetof(struct xdp_md, rx_queue_index): return __is_valid_xdp_access(off, size); } } return false; } switch (off) { case offsetof(struct xdp_md, data): info->reg_type = PTR_TO_PACKET; break; case offsetof(struct xdp_md, data_meta): info->reg_type = PTR_TO_PACKET_META; break; case offsetof(struct xdp_md, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return __is_valid_xdp_access(off, size); } void bpf_warn_invalid_xdp_action(u32 act) { const u32 act_max = XDP_REDIRECT; WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", act > act_max ? "Illegal" : "Driver unsupported", act); } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); static bool sock_addr_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock_addr)) return false; if (off % size != 0) return false; /* Disallow access to IPv6 fields from IPv4 contex and vise * versa. */ switch (off) { case bpf_ctx_range(struct bpf_sock_addr, user_ip4): switch (prog->expected_attach_type) { case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP4_RECVMSG: break; default: return false; } break; case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): switch (prog->expected_attach_type) { case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP6_SENDMSG: case BPF_CGROUP_UDP6_RECVMSG: break; default: return false; } break; case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): switch (prog->expected_attach_type) { case BPF_CGROUP_UDP4_SENDMSG: break; default: return false; } break; case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): switch (prog->expected_attach_type) { case BPF_CGROUP_UDP6_SENDMSG: break; default: return false; } break; } switch (off) { case bpf_ctx_range(struct bpf_sock_addr, user_ip4): case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): /* Only narrow read access allowed for now. */ if (type == BPF_READ) { bpf_ctx_record_field_size(info, size_default); if (!bpf_ctx_narrow_access_ok(off, size, size_default)) return false; } else { if (size != size_default) return false; } break; case bpf_ctx_range(struct bpf_sock_addr, user_port): if (size != size_default) return false; break; default: if (type == BPF_READ) { if (size != size_default) return false; } else { return false; } } return true; } static bool sock_ops_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock_ops)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; if (type == BPF_WRITE) { switch (off) { case offsetof(struct bpf_sock_ops, reply): case offsetof(struct bpf_sock_ops, sk_txhash): if (size != size_default) return false; break; default: return false; } } else { switch (off) { case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, bytes_acked): if (size != sizeof(__u64)) return false; break; default: if (size != size_default) return false; break; } } return true; } static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); } static bool sk_skb_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_index): case bpf_ctx_range(struct __sk_buff, priority): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, mark): return false; case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return bpf_skb_is_valid_access(off, size, type, prog, info); } static bool sk_msg_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct sk_msg_md, data): info->reg_type = PTR_TO_PACKET; if (size != sizeof(__u64)) return false; break; case offsetof(struct sk_msg_md, data_end): info->reg_type = PTR_TO_PACKET_END; if (size != sizeof(__u64)) return false; break; case bpf_ctx_range(struct sk_msg_md, family): case bpf_ctx_range(struct sk_msg_md, remote_ip4): case bpf_ctx_range(struct sk_msg_md, local_ip4): case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]): case bpf_ctx_range(struct sk_msg_md, remote_port): case bpf_ctx_range(struct sk_msg_md, local_port): case bpf_ctx_range(struct sk_msg_md, size): if (size != sizeof(__u32)) return false; break; default: return false; } return true; } static bool flow_dissector_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct __sk_buff)) return false; if (type == BPF_WRITE) return false; switch (off) { case bpf_ctx_range(struct __sk_buff, data): if (size != size_default) return false; info->reg_type = PTR_TO_PACKET; return true; case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; info->reg_type = PTR_TO_PACKET_END; return true; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_FLOW_KEYS; return true; default: return false; } } static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct __sk_buff, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, data)); break; case offsetof(struct __sk_buff, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, data_end)); break; case offsetof(struct __sk_buff, flow_keys): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, flow_keys)); break; } return insn - insn_buf; } static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct __sk_buff, len): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, len, 4, target_size)); break; case offsetof(struct __sk_buff, protocol): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, protocol, 2, target_size)); break; case offsetof(struct __sk_buff, vlan_proto): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, vlan_proto, 2, target_size)); break; case offsetof(struct __sk_buff, priority): if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, priority, 4, target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, priority, 4, target_size)); break; case offsetof(struct __sk_buff, ingress_ifindex): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, skb_iif, 4, target_size)); break; case offsetof(struct __sk_buff, ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct net_device, ifindex, 4, target_size)); break; case offsetof(struct __sk_buff, hash): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, hash, 4, target_size)); break; case offsetof(struct __sk_buff, mark): if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, mark, 4, target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, mark, 4, target_size)); break; case offsetof(struct __sk_buff, pkt_type): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, PKT_TYPE_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); #endif break; case offsetof(struct __sk_buff, queue_mapping): if (type == BPF_WRITE) { *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, queue_mapping, 2, target_size)); } else { *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, queue_mapping, 2, target_size)); } break; case offsetof(struct __sk_buff, vlan_present): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, PKT_VLAN_PRESENT_OFFSET()); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); break; case offsetof(struct __sk_buff, vlan_tci): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, vlan_tci, 2, target_size)); break; case offsetof(struct __sk_buff, cb[0]) ... offsetofend(struct __sk_buff, cb[4]) - 1: BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); BUILD_BUG_ON((offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data)) % sizeof(__u64)); prog->cb_access = 1; off = si->off; off -= offsetof(struct __sk_buff, cb[0]); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, data); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, tc_classid): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); off = si->off; off -= offsetof(struct __sk_buff, tc_classid); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, tc_classid); *target_size = 2; if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), si->dst_reg, si->src_reg, offsetof(struct sk_buff, data)); break; case offsetof(struct __sk_buff, data_meta): off = si->off; off -= offsetof(struct __sk_buff, data_meta); off += offsetof(struct sk_buff, cb); off += offsetof(struct bpf_skb_data_end, data_meta); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, data_end): off = si->off; off -= offsetof(struct __sk_buff, data_end); off += offsetof(struct sk_buff, cb); off += offsetof(struct bpf_skb_data_end, data_end); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, tc_index): #ifdef CONFIG_NET_SCHED if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tc_index, 2, target_size)); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tc_index, 2, target_size)); #else *target_size = 2; if (type == BPF_WRITE) *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); else *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, napi_id): #if defined(CONFIG_NET_RX_BUSY_POLL) *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, napi_id, 4, target_size)); *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #else *target_size = 4; *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_family, 2, target_size)); break; case offsetof(struct __sk_buff, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_daddr, 4, target_size)); break; case offsetof(struct __sk_buff, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_rcv_saddr, 4, target_size)); break; case offsetof(struct __sk_buff, remote_ip6[0]) ... offsetof(struct __sk_buff, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct __sk_buff, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, local_ip6[0]) ... offsetof(struct __sk_buff, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct __sk_buff, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_dport, 2, target_size)); #ifndef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct __sk_buff, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_num, 2, target_size)); break; case offsetof(struct __sk_buff, tstamp): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tstamp, 8, target_size)); else *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tstamp, 8, target_size)); break; case offsetof(struct __sk_buff, gso_segs): /* si->dst_reg = skb_shinfo(SKB); */ #ifdef NET_SKBUFF_DATA_USES_OFFSET *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), si->dst_reg, si->src_reg, offsetof(struct sk_buff, head)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), BPF_REG_AX, si->src_reg, offsetof(struct sk_buff, end)); *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); #else *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), si->dst_reg, si->src_reg, offsetof(struct sk_buff, end)); #endif *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), si->dst_reg, si->dst_reg, bpf_target_off(struct skb_shared_info, gso_segs, 2, target_size)); break; case offsetof(struct __sk_buff, wire_len): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); off = si->off; off -= offsetof(struct __sk_buff, wire_len); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, pkt_len); *target_size = 4; *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, sk): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break; } return insn - insn_buf; } u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct bpf_sock, bound_dev_if): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); break; case offsetof(struct bpf_sock, mark): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_mark)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_mark)); break; case offsetof(struct bpf_sock, priority): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_priority)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_priority)); break; case offsetof(struct bpf_sock, family): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_family), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_family, FIELD_SIZEOF(struct sock_common, skc_family), target_size)); break; case offsetof(struct bpf_sock, type): BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); *target_size = 2; break; case offsetof(struct bpf_sock, protocol): BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); *target_size = 1; break; case offsetof(struct bpf_sock, src_ip4): *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_rcv_saddr, FIELD_SIZEOF(struct sock_common, skc_rcv_saddr), target_size)); break; case offsetof(struct bpf_sock, dst_ip4): *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_daddr, FIELD_SIZEOF(struct sock_common, skc_daddr), target_size)); break; case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) off = si->off; off -= offsetof(struct bpf_sock, src_ip6[0]); *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off( struct sock_common, skc_v6_rcv_saddr.s6_addr32[0], FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]), target_size) + off); #else (void)off; *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) off = si->off; off -= offsetof(struct bpf_sock, dst_ip6[0]); *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_v6_daddr.s6_addr32[0], FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]), target_size) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); *target_size = 4; #endif break; case offsetof(struct bpf_sock, src_port): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_num), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_num, FIELD_SIZEOF(struct sock_common, skc_num), target_size)); break; case offsetof(struct bpf_sock, dst_port): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_dport), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_dport, FIELD_SIZEOF(struct sock_common, skc_dport), target_size)); break; case offsetof(struct bpf_sock, state): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_state), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_state, FIELD_SIZEOF(struct sock_common, skc_state), target_size)); break; } return insn - insn_buf; } static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct __sk_buff, ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct net_device, ifindex, 4, target_size)); break; default: return bpf_convert_ctx_access(type, si, insn_buf, prog, target_size); } return insn - insn_buf; } static u32 xdp_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct xdp_md, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data)); break; case offsetof(struct xdp_md, data_meta): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_meta)); break; case offsetof(struct xdp_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_end)); break; case offsetof(struct xdp_md, ingress_ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, rxq)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), si->dst_reg, si->dst_reg, offsetof(struct xdp_rxq_info, dev)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct net_device, ifindex)); break; case offsetof(struct xdp_md, rx_queue_index): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, rxq)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct xdp_rxq_info, queue_index)); break; } return insn - insn_buf; } /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of * context Structure, F is Field in context structure that contains a pointer * to Nested Structure of type NS that has the field NF. * * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make * sure that SIZE is not greater than actual size of S.F.NF. * * If offset OFF is provided, the load happens from that offset relative to * offset of NF. */ #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ do { \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ si->src_reg, offsetof(S, F)); \ *insn++ = BPF_LDX_MEM( \ SIZE, si->dst_reg, si->dst_reg, \ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ target_size) \ + OFF); \ } while (0) #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ BPF_FIELD_SIZEOF(NS, NF), 0) /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. * * It doesn't support SIZE argument though since narrow stores are not * supported for now. * * In addition it uses Temporary Field TF (member of struct S) as the 3rd * "register" since two registers available in convert_ctx_access are not * enough: we can't override neither SRC, since it contains value to store, nor * DST since it contains pointer to context that may be used by later * instructions. But we need a temporary place to save pointer to nested * structure whose field we want to store to. */ #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \ do { \ int tmp_reg = BPF_REG_9; \ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ --tmp_reg; \ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ --tmp_reg; \ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ offsetof(S, TF)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ si->dst_reg, offsetof(S, F)); \ *insn++ = BPF_STX_MEM( \ BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ target_size) \ + OFF); \ *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ offsetof(S, TF)); \ } while (0) #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ TF) \ do { \ if (type == BPF_WRITE) { \ SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \ TF); \ } else { \ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ S, NS, F, NF, SIZE, OFF); \ } \ } while (0) #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct bpf_sock_addr, user_family): SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, struct sockaddr, uaddr, sa_family); break; case offsetof(struct bpf_sock_addr, user_ip4): SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, sin_addr, BPF_SIZE(si->code), 0, tmp_reg); break; case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): off = si->off; off -= offsetof(struct bpf_sock_addr, user_ip6[0]); SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; case offsetof(struct bpf_sock_addr, user_port): /* To get port we need to know sa_family first and then treat * sockaddr as either sockaddr_in or sockaddr_in6. * Though we can simplify since port field has same offset and * size in both structures. * Here we check this invariant and use just one of the * structures if it's true. */ BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != offsetof(struct sockaddr_in6, sin6_port)); BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) != FIELD_SIZEOF(struct sockaddr_in6, sin6_port)); SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, sin6_port, tmp_reg); break; case offsetof(struct bpf_sock_addr, family): SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, struct sock, sk, sk_family); break; case offsetof(struct bpf_sock_addr, type): SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct sock, sk, __sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); break; case offsetof(struct bpf_sock_addr, protocol): SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct sock, sk, __sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); break; case offsetof(struct bpf_sock_addr, msg_src_ip4): /* Treat t_ctx as struct in_addr for msg_src_ip4. */ SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct in_addr, t_ctx, s_addr, BPF_SIZE(si->code), 0, tmp_reg); break; case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): off = si->off; off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( struct bpf_sock_addr_kern, struct in6_addr, t_ctx, s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; } return insn - insn_buf; } static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; /* Helper macro for adding read access to tcp_sock or sock fields. */ #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, \ is_fullsock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, sk));\ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ OBJ_FIELD), \ si->dst_reg, si->dst_reg, \ offsetof(OBJ, OBJ_FIELD)); \ } while (0) #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) /* Helper macro for adding write access to tcp_sock or sock fields. * The macro is called with two registers, dst_reg which contains a pointer * to ctx (context) and src_reg which contains the value that should be * stored. However, we need an additional register since we cannot overwrite * dst_reg because it may be used later in the program. * Instead we "borrow" one of the other register. We first save its value * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore * it at the end of the macro. */ #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ int reg = BPF_REG_9; \ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ if (si->dst_reg == reg || si->src_reg == reg) \ reg--; \ if (si->dst_reg == reg || si->src_reg == reg) \ reg--; \ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ offsetof(struct bpf_sock_ops_kern, \ temp)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ is_fullsock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, sk));\ *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ reg, si->src_reg, \ offsetof(OBJ, OBJ_FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ temp)); \ } while (0) #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ do { \ if (TYPE == BPF_WRITE) \ SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ else \ SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ } while (0) CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops, SOCK_OPS_GET_TCP_SOCK_FIELD); if (insn > insn_buf) return insn - insn_buf; switch (si->off) { case offsetof(struct bpf_sock_ops, op) ... offsetof(struct bpf_sock_ops, replylong[3]): BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) != FIELD_SIZEOF(struct bpf_sock_ops_kern, op)); BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) != FIELD_SIZEOF(struct bpf_sock_ops_kern, reply)); BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) != FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong)); off = si->off; off -= offsetof(struct bpf_sock_ops, op); off += offsetof(struct bpf_sock_ops_kern, op); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); break; case offsetof(struct bpf_sock_ops, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_family)); break; case offsetof(struct bpf_sock_ops, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_daddr)); break; case offsetof(struct bpf_sock_ops, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_rcv_saddr)); break; case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... offsetof(struct bpf_sock_ops, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct bpf_sock_ops, local_ip6[0]) ... offsetof(struct bpf_sock_ops, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct bpf_sock_ops, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct bpf_sock_ops, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_dport)); #ifndef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct bpf_sock_ops, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_num)); break; case offsetof(struct bpf_sock_ops, is_fullsock): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, is_fullsock), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, is_fullsock)); break; case offsetof(struct bpf_sock_ops, state): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_state)); break; case offsetof(struct bpf_sock_ops, rtt_min): BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct tcp_sock, rtt_min) + FIELD_SIZEOF(struct minmax_sample, t)); break; case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags, struct tcp_sock); break; case offsetof(struct bpf_sock_ops, sk_txhash): SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, struct sock, type); break; } return insn - insn_buf; } static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct __sk_buff, data_end): off = si->off; off -= offsetof(struct __sk_buff, data_end); off += offsetof(struct sk_buff, cb); off += offsetof(struct tcp_skb_cb, bpf.data_end); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; default: return bpf_convert_ctx_access(type, si, insn_buf, prog, target_size); } return insn - insn_buf; } static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; #if IS_ENABLED(CONFIG_IPV6) int off; #endif /* convert ctx uses the fact sg element is first in struct */ BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0); switch (si->off) { case offsetof(struct sk_msg_md, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), si->dst_reg, si->src_reg, offsetof(struct sk_msg, data)); break; case offsetof(struct sk_msg_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), si->dst_reg, si->src_reg, offsetof(struct sk_msg, data_end)); break; case offsetof(struct sk_msg_md, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_family)); break; case offsetof(struct sk_msg_md, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_daddr)); break; case offsetof(struct sk_msg_md, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_rcv_saddr)); break; case offsetof(struct sk_msg_md, remote_ip6[0]) ... offsetof(struct sk_msg_md, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct sk_msg_md, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct sk_msg_md, local_ip6[0]) ... offsetof(struct sk_msg_md, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct sk_msg_md, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct sk_msg_md, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_dport)); #ifndef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct sk_msg_md, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_num)); break; case offsetof(struct sk_msg_md, size): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size), si->dst_reg, si->src_reg, offsetof(struct sk_msg_sg, size)); break; } return insn - insn_buf; } const struct bpf_verifier_ops sk_filter_verifier_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, .gen_ld_abs = bpf_gen_ld_abs, }; const struct bpf_prog_ops sk_filter_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops tc_cls_act_verifier_ops = { .get_func_proto = tc_cls_act_func_proto, .is_valid_access = tc_cls_act_is_valid_access, .convert_ctx_access = tc_cls_act_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, .gen_ld_abs = bpf_gen_ld_abs, }; const struct bpf_prog_ops tc_cls_act_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops xdp_verifier_ops = { .get_func_proto = xdp_func_proto, .is_valid_access = xdp_is_valid_access, .convert_ctx_access = xdp_convert_ctx_access, .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops xdp_prog_ops = { .test_run = bpf_prog_test_run_xdp, }; const struct bpf_verifier_ops cg_skb_verifier_ops = { .get_func_proto = cg_skb_func_proto, .is_valid_access = cg_skb_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; const struct bpf_prog_ops cg_skb_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops lwt_in_verifier_ops = { .get_func_proto = lwt_in_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; const struct bpf_prog_ops lwt_in_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops lwt_out_verifier_ops = { .get_func_proto = lwt_out_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; const struct bpf_prog_ops lwt_out_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops lwt_xmit_verifier_ops = { .get_func_proto = lwt_xmit_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, }; const struct bpf_prog_ops lwt_xmit_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { .get_func_proto = lwt_seg6local_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; const struct bpf_prog_ops lwt_seg6local_prog_ops = { .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops cg_sock_verifier_ops = { .get_func_proto = sock_filter_func_proto, .is_valid_access = sock_filter_is_valid_access, .convert_ctx_access = bpf_sock_convert_ctx_access, }; const struct bpf_prog_ops cg_sock_prog_ops = { }; const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { .get_func_proto = sock_addr_func_proto, .is_valid_access = sock_addr_is_valid_access, .convert_ctx_access = sock_addr_convert_ctx_access, }; const struct bpf_prog_ops cg_sock_addr_prog_ops = { }; const struct bpf_verifier_ops sock_ops_verifier_ops = { .get_func_proto = sock_ops_func_proto, .is_valid_access = sock_ops_is_valid_access, .convert_ctx_access = sock_ops_convert_ctx_access, }; const struct bpf_prog_ops sock_ops_prog_ops = { }; const struct bpf_verifier_ops sk_skb_verifier_ops = { .get_func_proto = sk_skb_func_proto, .is_valid_access = sk_skb_is_valid_access, .convert_ctx_access = sk_skb_convert_ctx_access, .gen_prologue = sk_skb_prologue, }; const struct bpf_prog_ops sk_skb_prog_ops = { }; const struct bpf_verifier_ops sk_msg_verifier_ops = { .get_func_proto = sk_msg_func_proto, .is_valid_access = sk_msg_is_valid_access, .convert_ctx_access = sk_msg_convert_ctx_access, .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops sk_msg_prog_ops = { }; const struct bpf_verifier_ops flow_dissector_verifier_ops = { .get_func_proto = flow_dissector_func_proto, .is_valid_access = flow_dissector_is_valid_access, .convert_ctx_access = flow_dissector_convert_ctx_access, }; const struct bpf_prog_ops flow_dissector_prog_ops = { .test_run = bpf_prog_test_run_flow_dissector, }; int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; struct sk_filter *filter; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; filter = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); if (filter) { RCU_INIT_POINTER(sk->sk_filter, NULL); sk_filter_uncharge(sk, filter); ret = 0; } return ret; } EXPORT_SYMBOL_GPL(sk_detach_filter); int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) { struct sock_fprog_kern *fprog; struct sk_filter *filter; int ret = 0; lock_sock(sk); filter = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); if (!filter) goto out; /* We're copying the filter that has been originally attached, * so no conversion/decode needed anymore. eBPF programs that * have no original program cannot be dumped through this. */ ret = -EACCES; fprog = filter->prog->orig_prog; if (!fprog) goto out; ret = fprog->len; if (!len) /* User space only enquires number of filter blocks. */ goto out; ret = -EINVAL; if (len < fprog->len) goto out; ret = -EFAULT; if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) goto out; /* Instead of bytes, the API requests to return the number * of filter blocks. */ ret = fprog->len; out: release_sock(sk); return ret; } #ifdef CONFIG_INET struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; void *data_end; u32 hash; u32 reuseport_id; bool bind_inany; }; static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, struct sock_reuseport *reuse, struct sock *sk, struct sk_buff *skb, u32 hash) { reuse_kern->skb = skb; reuse_kern->sk = sk; reuse_kern->selected_sk = NULL; reuse_kern->data_end = skb->data + skb_headlen(skb); reuse_kern->hash = hash; reuse_kern->reuseport_id = reuse->reuseport_id; reuse_kern->bind_inany = reuse->bind_inany; } struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, u32 hash) { struct sk_reuseport_kern reuse_kern; enum sk_action action; bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash); action = BPF_PROG_RUN(prog, &reuse_kern); if (action == SK_PASS) return reuse_kern.selected_sk; else return ERR_PTR(-ECONNREFUSED); } BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, struct bpf_map *, map, void *, key, u32, flags) { struct sock_reuseport *reuse; struct sock *selected_sk; selected_sk = map->ops->map_lookup_elem(map, key); if (!selected_sk) return -ENOENT; reuse = rcu_dereference(selected_sk->sk_reuseport_cb); if (!reuse) /* selected_sk is unhashed (e.g. by close()) after the * above map_lookup_elem(). Treat selected_sk has already * been removed from the map. */ return -ENOENT; if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { struct sock *sk; if (unlikely(!reuse_kern->reuseport_id)) /* There is a small race between adding the * sk to the map and setting the * reuse_kern->reuseport_id. * Treat it as the sk has not been added to * the bpf map yet. */ return -ENOENT; sk = reuse_kern->sk; if (sk->sk_protocol != selected_sk->sk_protocol) return -EPROTOTYPE; else if (sk->sk_family != selected_sk->sk_family) return -EAFNOSUPPORT; /* Catch all. Likely bound to a different sockaddr. */ return -EBADFD; } reuse_kern->selected_sk = selected_sk; return 0; } static const struct bpf_func_proto sk_select_reuseport_proto = { .func = sk_select_reuseport, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(sk_reuseport_load_bytes, const struct sk_reuseport_kern *, reuse_kern, u32, offset, void *, to, u32, len) { return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); } static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { .func = sk_reuseport_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_5(sk_reuseport_load_bytes_relative, const struct sk_reuseport_kern *, reuse_kern, u32, offset, void *, to, u32, len, u32, start_header) { return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, len, start_header); } static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { .func = sk_reuseport_load_bytes_relative, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; static const struct bpf_func_proto * sk_reuseport_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_sk_select_reuseport: return &sk_select_reuseport_proto; case BPF_FUNC_skb_load_bytes: return &sk_reuseport_load_bytes_proto; case BPF_FUNC_skb_load_bytes_relative: return &sk_reuseport_load_bytes_relative_proto; default: return bpf_base_func_proto(func_id); } } static bool sk_reuseport_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const u32 size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct sk_reuseport_md) || off % size || type != BPF_READ) return false; switch (off) { case offsetof(struct sk_reuseport_md, data): info->reg_type = PTR_TO_PACKET; return size == sizeof(__u64); case offsetof(struct sk_reuseport_md, data_end): info->reg_type = PTR_TO_PACKET_END; return size == sizeof(__u64); case offsetof(struct sk_reuseport_md, hash): return size == size_default; /* Fields that allow narrowing */ case offsetof(struct sk_reuseport_md, eth_protocol): if (size < FIELD_SIZEOF(struct sk_buff, protocol)) return false; /* fall through */ case offsetof(struct sk_reuseport_md, ip_protocol): case offsetof(struct sk_reuseport_md, bind_inany): case offsetof(struct sk_reuseport_md, len): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); default: return false; } } #define SK_REUSEPORT_LOAD_FIELD(F) ({ \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ si->dst_reg, si->src_reg, \ bpf_target_off(struct sk_reuseport_kern, F, \ FIELD_SIZEOF(struct sk_reuseport_kern, F), \ target_size)); \ }) #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ struct sk_buff, \ skb, \ SKB_FIELD) #define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \ struct sock, \ sk, \ SK_FIELD, BPF_SIZE, EXTRA_OFF) static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct sk_reuseport_md, data): SK_REUSEPORT_LOAD_SKB_FIELD(data); break; case offsetof(struct sk_reuseport_md, len): SK_REUSEPORT_LOAD_SKB_FIELD(len); break; case offsetof(struct sk_reuseport_md, eth_protocol): SK_REUSEPORT_LOAD_SKB_FIELD(protocol); break; case offsetof(struct sk_reuseport_md, ip_protocol): BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian * aware. No further narrowing or masking is needed. */ *target_size = 1; break; case offsetof(struct sk_reuseport_md, data_end): SK_REUSEPORT_LOAD_FIELD(data_end); break; case offsetof(struct sk_reuseport_md, hash): SK_REUSEPORT_LOAD_FIELD(hash); break; case offsetof(struct sk_reuseport_md, bind_inany): SK_REUSEPORT_LOAD_FIELD(bind_inany); break; } return insn - insn_buf; } const struct bpf_verifier_ops sk_reuseport_verifier_ops = { .get_func_proto = sk_reuseport_func_proto, .is_valid_access = sk_reuseport_is_valid_access, .convert_ctx_access = sk_reuseport_convert_ctx_access, }; const struct bpf_prog_ops sk_reuseport_prog_ops = { }; #endif /* CONFIG_INET */
koct9i/linux
net/core/filter.c
C
gpl-2.0
234,767
/* savage_bci.c -- BCI support for Savage * * Copyright 2004 Felix Kuehling * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "savage_drm.h" #include "savage_drv.h" /* Need a long timeout for shadow status updates can take a while * and so can waiting for events when the queue is full. */ #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 static int savage_do_cleanup_bci(struct drm_device *dev); static int savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t mask = dev_priv->status_used_mask; uint32_t threshold = dev_priv->bci_threshold_hi; uint32_t status; int i; #if SAVAGE_BCI_DEBUG if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) DRM_ERROR("Trying to emit %d words " "(more than guaranteed space in COB)\n", n); #endif for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[0]; if ((status & mask) < threshold) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); #endif return -EBUSY; } static int savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; int i; for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD0); if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } static int savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; int i; for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } /* * Waiting for events. * * The BIOSresets the event tag to 0 on mode changes. Therefore we * never emit 0 to the event tag. If we find a 0 event tag we know the * BIOS stomped on it and return success assuming that the BIOS waited * for engine idle. * * Note: if the Xserver uses the event tag it has to follow the same * rule. Otherwise there may be glitches every 2^16 events. */ static int savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[1]; if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } static int savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD1); if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, unsigned int flags) { uint16_t count; BCI_LOCALS; if (dev_priv->status_ptr) { /* coordinate with Xserver */ count = dev_priv->status_ptr[1023]; if (count < dev_priv->event_counter) dev_priv->event_wrap++; } else { count = dev_priv->event_counter; } count = (count + 1) & 0xffff; if (count == 0) { count++; /* See the comment above savage_wait_event_*. */ dev_priv->event_wrap++; } dev_priv->event_counter = count; if (dev_priv->status_ptr) dev_priv->status_ptr[1023] = (uint32_t) count; if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { unsigned int wait_cmd = BCI_CMD_WAIT; if ((flags & SAVAGE_WAIT_2D)) wait_cmd |= BCI_CMD_WAIT_2D; if ((flags & SAVAGE_WAIT_3D)) wait_cmd |= BCI_CMD_WAIT_3D; BEGIN_BCI(2); BCI_WRITE(wait_cmd); } else { BEGIN_BCI(1); } BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count); return count; } /* * Freelist management */ static int savage_freelist_init(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_savage_buf_priv_t *entry; int i; DRM_DEBUG("count=%d\n", dma->buf_count); dev_priv->head.next = &dev_priv->tail; dev_priv->head.prev = NULL; dev_priv->head.buf = NULL; dev_priv->tail.next = NULL; dev_priv->tail.prev = &dev_priv->head; dev_priv->tail.buf = NULL; for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; entry = buf->dev_private; SET_AGE(&entry->age, 0, 0); entry->buf = buf; entry->next = dev_priv->head.next; entry->prev = &dev_priv->head; dev_priv->head.next->prev = entry; dev_priv->head.next = entry; } return 0; } static struct drm_buf *savage_freelist_get(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; uint16_t event; unsigned int wrap; DRM_DEBUG("\n"); UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); DRM_DEBUG(" head=0x%04x %d\n", event, wrap); if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { drm_savage_buf_priv_t *next = tail->next; drm_savage_buf_priv_t *prev = tail->prev; prev->next = next; next->prev = prev; tail->next = tail->prev = NULL; return tail->buf; } DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); return NULL; } void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); if (entry->next != NULL || entry->prev != NULL) { DRM_ERROR("entry already on freelist.\n"); return; } prev = &dev_priv->head; next = prev->next; prev->next = entry; next->prev = entry; entry->prev = prev; entry->next = next; } /* * Command DMA */ static int savage_dma_init(drm_savage_private_t * dev_priv) { unsigned int i; dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / (SAVAGE_DMA_PAGE_SIZE * 4); dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, GFP_KERNEL); if (dev_priv->dma_pages == NULL) return -ENOMEM; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, 0, 0); dev_priv->first_dma_page = 0; dev_priv->current_dma_page = 0; return 0; } void savage_dma_reset(drm_savage_private_t * dev_priv) { uint16_t event; unsigned int wrap, i; event = savage_bci_emit_event(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, event, wrap); dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page) { uint16_t event; unsigned int wrap; /* Faked DMA buffer pages don't age. */ if (dev_priv->cmd_dma == &dev_priv->fake_dma) return; UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ if (dev_priv->dma_pages[page].age.wrap > wrap || (dev_priv->dma_pages[page].age.wrap == wrap && dev_priv->dma_pages[page].age.event > event)) { if (dev_priv->wait_evnt(dev_priv, dev_priv->dma_pages[page].age.event) < 0) DRM_ERROR("wait_evnt failed!\n"); } } uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) { unsigned int cur = dev_priv->current_dma_page; unsigned int rest = SAVAGE_DMA_PAGE_SIZE - dev_priv->dma_pages[cur].used; unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; uint32_t *dma_ptr; unsigned int i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; dev_priv->dma_pages[cur].used += rest; n -= rest; cur++; } else { dev_priv->dma_flush(dev_priv); nr_pages = (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; for (i = cur; i < dev_priv->nr_dma_pages; ++i) { dev_priv->dma_pages[i].age = dev_priv->last_dma_age; dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; dev_priv->first_dma_page = cur = 0; } for (i = cur; nr_pages > 0; ++i, --nr_pages) { #if SAVAGE_DMA_DEBUG if (dev_priv->dma_pages[i].used) { DRM_ERROR("unflushed page %u: used=%u\n", i, dev_priv->dma_pages[i].used); } #endif if (n > SAVAGE_DMA_PAGE_SIZE) dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; else dev_priv->dma_pages[i].used = n; n -= SAVAGE_DMA_PAGE_SIZE; } dev_priv->current_dma_page = --i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", i, dev_priv->dma_pages[i].used, n); savage_dma_wait(dev_priv, dev_priv->current_dma_page); return dma_ptr; } static void savage_dma_flush(drm_savage_private_t * dev_priv) { unsigned int first = dev_priv->first_dma_page; unsigned int cur = dev_priv->current_dma_page; uint16_t event; unsigned int wrap, pad, align, len, i; unsigned long phys_addr; BCI_LOCALS; if (first == cur && dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) return; /* pad length to multiples of 2 entries * align start of next DMA block to multiles of 8 entries */ pad = -dev_priv->dma_pages[cur].used & 1; align = -(dev_priv->dma_pages[cur].used + pad) & 7; DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " "pad=%u, align=%u\n", first, cur, dev_priv->dma_pages[first].flushed, dev_priv->dma_pages[cur].used, pad, align); /* pad with noops */ if (pad) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; while (pad != 0) { *dma_ptr++ = BCI_CMD_WAIT; pad--; } } DRM_MEMORYBARRIER(); /* do flush ... */ phys_addr = dev_priv->cmd_dma->offset + (first * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[first].flushed) * 4; len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; DRM_DEBUG("phys_addr=%lx, len=%u\n", phys_addr | dev_priv->dma_type, len); BEGIN_BCI(3); BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); BCI_WRITE(phys_addr | dev_priv->dma_type); BCI_DMA(len); /* fix alignment of the start of the next block */ dev_priv->dma_pages[cur].used += align; /* age DMA pages */ event = savage_bci_emit_event(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = first; i < cur; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } /* age the current page only when it's full */ if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); dev_priv->dma_pages[cur].used = 0; dev_priv->dma_pages[cur].flushed = 0; /* advance to next page */ cur++; if (cur == dev_priv->nr_dma_pages) cur = 0; dev_priv->first_dma_page = dev_priv->current_dma_page = cur; } else { dev_priv->first_dma_page = cur; dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; } SET_AGE(&dev_priv->last_dma_age, event, wrap); DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, dev_priv->dma_pages[cur].used, dev_priv->dma_pages[cur].flushed); } static void savage_fake_dma_flush(drm_savage_private_t * dev_priv) { unsigned int i, j; BCI_LOCALS; if (dev_priv->first_dma_page == dev_priv->current_dma_page && dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) return; DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", dev_priv->first_dma_page, dev_priv->current_dma_page, dev_priv->dma_pages[dev_priv->current_dma_page].used); for (i = dev_priv->first_dma_page; i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++i) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + i * SAVAGE_DMA_PAGE_SIZE; #if SAVAGE_DMA_DEBUG /* Sanity check: all pages except the last one must be full. */ if (i < dev_priv->current_dma_page && dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { DRM_ERROR("partial DMA page %u: used=%u", i, dev_priv->dma_pages[i].used); } #endif BEGIN_BCI(dev_priv->dma_pages[i].used); for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { BCI_WRITE(dma_ptr[j]); } dev_priv->dma_pages[i].used = 0; } /* reset to first page */ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = (enum savage_family)chipset; pci_set_master(dev->pdev); return 0; } /* * Initialize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ int savage_driver_firstopen(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; /* fb_rsrc and aper_rsrc aren't really used currently, but still exist * in case we decide we need information on the BAR for BSD in the * future. */ unsigned int fb_rsrc, aper_rsrc; int ret = 0; dev_priv->mtrr[0].handle = -1; dev_priv->mtrr[1].handle = -1; dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = pci_resource_start(dev->pdev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); dev_priv->mtrr[1].base = fb_base + 0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = drm_mtrr_add(dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, DRM_MTRR_WC); dev_priv->mtrr[2].base = fb_base + 0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = drm_mtrr_add(dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x08000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 1)); } } else { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = pci_resource_len(dev->pdev, 1); aper_rsrc = 2; aperture_base = pci_resource_start(dev->pdev, 2); /* Automatic MTRR setup will do the right thing. */ } ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) return ret; ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->fb); if (ret) return ret; ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->aperture); return ret; } /* * Delete MTRRs and free device-private data. */ void savage_driver_lastclose(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; int i; for (i = 0; i < 3; ++i) if (dev_priv->mtrr[i].handle >= 0) drm_mtrr_del(dev_priv->mtrr[i].handle, dev_priv->mtrr[i].base, dev_priv->mtrr[i].size, DRM_MTRR_WC); } int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; kfree(dev_priv); return 0; } static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) { drm_savage_private_t *dev_priv = dev->dev_private; if (init->fb_bpp != 16 && init->fb_bpp != 32) { DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->depth_bpp != 16 && init->depth_bpp != 32) { DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->dma_type != SAVAGE_DMA_AGP && init->dma_type != SAVAGE_DMA_PCI) { DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); return -EINVAL; } dev_priv->cob_size = init->cob_size; dev_priv->bci_threshold_lo = init->bci_threshold_lo; dev_priv->bci_threshold_hi = init->bci_threshold_hi; dev_priv->dma_type = init->dma_type; dev_priv->fb_bpp = init->fb_bpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_bpp = init->depth_bpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; dev_priv->texture_offset = init->texture_offset; dev_priv->texture_size = init->texture_size; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (init->status_offset != 0) { dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("could not find shadow status region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->status = NULL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } drm_core_ioremap(dev->agp_buffer_map, dev); if (!dev->agp_buffer_map->handle) { DRM_ERROR("failed to ioremap DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } } if (init->agp_textures_offset) { dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset); if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->agp_textures = NULL; } if (init->cmd_dma_offset) { if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { DRM_ERROR("command DMA not supported on " "Savage3D/MX/IX.\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (dev->dma && dev->dma->buflist) { DRM_ERROR("command and vertex DMA not supported " "at the same time.\n"); savage_do_cleanup_bci(dev); return -EINVAL; } dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); if (!dev_priv->cmd_dma) { DRM_ERROR("could not find command DMA region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP) { if (dev_priv->cmd_dma->type != _DRM_AGP) { DRM_ERROR("AGP command DMA region is not a " "_DRM_AGP map!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } drm_core_ioremap(dev_priv->cmd_dma, dev); if (!dev_priv->cmd_dma->handle) { DRM_ERROR("failed to ioremap command " "DMA region!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { DRM_ERROR("PCI command DMA region is not a " "_DRM_CONSISTENT map!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->cmd_dma = NULL; } dev_priv->dma_flush = savage_dma_flush; if (!dev_priv->cmd_dma) { DRM_DEBUG("falling back to faked command DMA.\n"); dev_priv->fake_dma.offset = 0; dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; dev_priv->fake_dma.type = _DRM_SHM; dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE, GFP_KERNEL); if (!dev_priv->fake_dma.handle) { DRM_ERROR("could not allocate faked DMA buffer!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } dev_priv->cmd_dma = &dev_priv->fake_dma; dev_priv->dma_flush = savage_fake_dma_flush; } dev_priv->sarea_priv = (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + init->sarea_priv_offset); /* setup bitmap descriptors */ { unsigned int color_tile_format; unsigned int depth_tile_format; unsigned int front_stride, back_stride, depth_stride; if (dev_priv->chipset <= S3_SAVAGE4) { color_tile_format = dev_priv->fb_bpp == 16 ? SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; depth_tile_format = dev_priv->depth_bpp == 16 ? SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; } else { color_tile_format = SAVAGE_BD_TILE_DEST; depth_tile_format = SAVAGE_BD_TILE_DEST; } front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << SAVAGE_BD_TILE_SHIFT); dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << SAVAGE_BD_TILE_SHIFT); dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | (depth_tile_format << SAVAGE_BD_TILE_SHIFT); } /* setup status and bci ptr */ dev_priv->event_counter = 0; dev_priv->event_wrap = 0; dev_priv->bci_ptr = (volatile uint32_t *) ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; } else { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; } if (dev_priv->status != NULL) { dev_priv->status_ptr = (volatile uint32_t *)dev_priv->status->handle; dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; dev_priv->wait_evnt = savage_bci_wait_event_shadow; dev_priv->status_ptr[1023] = dev_priv->event_counter; } else { dev_priv->status_ptr = NULL; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; } else { dev_priv->wait_fifo = savage_bci_wait_fifo_s4; } dev_priv->wait_evnt = savage_bci_wait_event_reg; } /* cliprect functions */ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; else dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; if (savage_freelist_init(dev) < 0) { DRM_ERROR("could not initialize freelist\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } if (savage_dma_init(dev_priv) < 0) { DRM_ERROR("could not initialize command DMA\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } return 0; } static int savage_do_cleanup_bci(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { kfree(dev_priv->fake_dma.handle); } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) drm_core_ioremapfree(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && dev->agp_buffer_map && dev->agp_buffer_map->handle) { drm_core_ioremapfree(dev->agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old * agp_buffer_map. */ dev->agp_buffer_map = NULL; } kfree(dev_priv->dma_pages); return 0; } static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); switch (init->func) { case SAVAGE_INIT_BCI: return savage_do_init_bci(dev, init); case SAVAGE_CLEANUP_BCI: return savage_do_cleanup_bci(dev); } return -EINVAL; } static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_event_emit_t *event = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); event->count = savage_bci_emit_event(dev_priv, event->flags); event->count |= dev_priv->event_wrap << 16; return 0; } static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_event_wait_t *event = data; unsigned int event_e, hw_e; unsigned int event_w, hw_w; DRM_DEBUG("\n"); UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) hw_e = dev_priv->status_ptr[1] & 0xffff; else hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; hw_w = dev_priv->event_wrap; if (hw_e > dev_priv->event_counter) hw_w--; /* hardware hasn't passed the last wrap yet */ event_e = event->count & 0xffff; event_w = event->count >> 16; /* Don't need to wait if * - event counter wrapped since the event was emitted or * - the hardware has advanced up to or over the event to wait for. */ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) return 0; else return dev_priv->wait_evnt(dev_priv, event_e); } /* * DMA buffer management */ static int savage_bci_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma *d) { struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { buf = savage_freelist_get(dev); if (!buf) return -EAGAIN; buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = savage_bci_get_buffers(dev, file_priv, d); } return ret; } void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; int i; if (!dma) return; if (!dev_priv) return; if (!dma->buflist) return; /*i830_flush_queue(dev); */ for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; if (buf->file_priv == file_priv && buf_priv && buf_priv->next == NULL && buf_priv->prev == NULL) { uint16_t event; DRM_DEBUG("reclaimed from client\n"); event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); savage_freelist_put(dev, buf); } } drm_core_reclaim_buffers(dev, file_priv); } struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
kv193/buildroot
linux/linux-kernel/drivers/gpu/drm/savage/savage_bci.c
C
gpl-2.0
31,167
/* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002 */ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/pm_runtime.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <asm/uaccess.h> #include "queue.h" MODULE_ALIAS("mmc:block"); #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "mmcblk." #define INAND_CMD38_ARG_EXT_CSD 113 #define INAND_CMD38_ARG_ERASE 0x00 #define INAND_CMD38_ARG_TRIM 0x01 #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ (req->cmd_flags & REQ_META)) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 static DEFINE_MUTEX(block_mutex); /* * The defaults come from config options but can be overriden by module * or bootarg options. */ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; /* * We've only got one major, so number of mmcblk devices is * limited to (1 << 20) / number of minors per device. It is also * currently limited by the size of the static bitmaps below. */ static int max_devices; #define MAX_DEVICES 256 /* TODO: Replace these with struct ida */ static DECLARE_BITMAP(dev_use, MAX_DEVICES); static DECLARE_BITMAP(name_use, MAX_DEVICES); /* * There is one mmc_blk_data per slot. */ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; struct list_head part; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; unsigned int part_type; unsigned int name_idx; unsigned int reset_done; #define MMC_BLK_READ BIT(0) #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) /* * Only set in main mmc_blk_data associated * with mmc_card with dev_set_drvdata, and keeps * track of the current selected device partition. */ unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; int area_type; }; static DEFINE_MUTEX(open_lock); enum { MMC_PACKED_NR_IDX = -1, MMC_PACKED_NR_ZERO, MMC_PACKED_NR_SINGLE, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) { struct mmc_packed *packed = mqrq->packed; BUG_ON(!packed); mqrq->cmd_type = MMC_PACKED_NONE; packed->nr_entries = MMC_PACKED_NR_ZERO; packed->idx_failure = MMC_PACKED_NR_IDX; packed->retries = 0; packed->blocks = 0; } static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; mutex_lock(&open_lock); md = disk->private_data; if (md && md->usage == 0) md = NULL; if (md) md->usage++; mutex_unlock(&open_lock); return md; } static inline int mmc_get_devidx(struct gendisk *disk) { int devmaj = MAJOR(disk_devt(disk)); int devidx = MINOR(disk_devt(disk)) / perdev_minors; if (!devmaj) devidx = disk->first_minor / perdev_minors; return devidx; } static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); } mutex_unlock(&open_lock); } static ssize_t power_ro_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int locked = 0; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) locked = 2; else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) locked = 1; ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); mmc_blk_put(md); return ret; } static ssize_t power_ro_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct mmc_blk_data *md, *part_md; struct mmc_card *card; unsigned long set; if (kstrtoul(buf, 0, &set)) return -EINVAL; if (set != 1) return count; md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; mmc_get_card(card); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | EXT_CSD_BOOT_WP_B_PWR_WP_EN, card->ext_csd.part_time); if (ret) pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; mmc_put_card(card); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", md->disk->disk_name); set_disk_ro(md->disk, 1); list_for_each_entry(part_md, &md->part, part) if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); set_disk_ro(part_md->disk, 1); } } mmc_blk_put(md); return count; } static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); ret = snprintf(buf, PAGE_SIZE, "%d\n", get_disk_ro(dev_to_disk(dev)) ^ md->read_only); mmc_blk_put(md); return ret; } static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); unsigned long set = simple_strtoul(buf, &end, 0); if (end == buf) { ret = -EINVAL; goto out; } set_disk_ro(dev_to_disk(dev), set || md->read_only); ret = count; out: mmc_blk_put(md); return ret; } static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); ret = 0; if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } } mutex_unlock(&block_mutex); return ret; } static void mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); } static int mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo->heads = 4; geo->sectors = 16; return 0; } struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; }; static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( struct mmc_ioc_cmd __user *user) { struct mmc_blk_ioc_data *idata; int err; idata = kzalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; } if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { err = -EFAULT; goto idata_err; } idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { err = -EOVERFLOW; goto idata_err; } if (!idata->buf_bytes) return idata; idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; goto idata_err; } if (copy_from_user(idata->buf, (void __user *)(unsigned long) idata->ic.data_ptr, idata->buf_bytes)) { err = -EFAULT; goto copy_err; } return idata; copy_err: kfree(idata->buf); idata_err: kfree(idata); out: return ERR_PTR(err); } static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, u32 retries_max) { int err; u32 retry_count = 0; if (!status || !retries_max) return -EINVAL; do { err = get_card_status(card, status, 5); if (err) break; if (!R1_STATUS(*status) && (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) break; /* RPMB programming operation complete */ /* * Rechedule to give the MMC device a chance to continue * processing the previous command without being polled too * frequently. */ usleep_range(1000, 5000); } while (++retry_count < retries_max); if (retry_count == retries_max) err = -EPERM; return err; } static int ioctl_do_sanitize(struct mmc_card *card) { int err; if (!mmc_can_sanitize(card)) { pr_warn("%s: %s - SANITIZE is not supported\n", mmc_hostname(card->host), __func__); err = -EOPNOTSUPP; goto out; } pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", mmc_hostname(card->host), __func__); err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1, MMC_SANITIZE_REQ_TIMEOUT); if (err) pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", mmc_hostname(card->host), __func__, err); pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), __func__); out: return err; } static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { struct mmc_blk_ioc_data *idata; struct mmc_blk_data *md; struct mmc_card *card; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = {NULL}; struct scatterlist sg; int err; int is_rpmb = false; u32 status = 0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the * whole block device, not on a partition. This prevents overspray * between sibling partitions. */ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) return -EPERM; idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; goto cmd_err; } if (md->area_type & MMC_BLK_DATA_AREA_RPMB) is_rpmb = true; card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); goto cmd_done; } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; cmd.flags = idata->ic.flags; if (idata->buf_bytes) { data.sg = &sg; data.sg_len = 1; data.blksz = idata->ic.blksz; data.blocks = idata->ic.blocks; sg_init_one(data.sg, idata->buf, idata->buf_bytes); if (idata->ic.write_flag) data.flags = MMC_DATA_WRITE; else data.flags = MMC_DATA_READ; /* data.flags must already be set before doing this. */ mmc_set_data_timeout(&data, card); /* Allow overriding the timeout_ns for empirical tuning. */ if (idata->ic.data_timeout_ns) data.timeout_ns = idata->ic.data_timeout_ns; if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Pretend this is a data transfer and rely on the * host driver to compute timeout. When all host * drivers support cmd.cmd_timeout for R1B, this * can be changed to: * * mrq.data = NULL; * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; */ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; } mrq.data = &data; } mrq.cmd = &cmd; mmc_get_card(card); err = mmc_blk_part_switch(card, md); if (err) goto cmd_rel_host; if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); if (err) goto cmd_rel_host; } if (is_rpmb) { err = mmc_set_blockcount(card, data.blocks, idata->ic.write_flag & (1 << 31)); if (err) goto cmd_rel_host; } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && (cmd.opcode == MMC_SWITCH)) { err = ioctl_do_sanitize(card); if (err) pr_err("%s: ioctl_do_sanitize() failed. err = %d", __func__, err); goto cmd_rel_host; } mmc_wait_for_req(card->host, &mrq); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); err = cmd.error; goto cmd_rel_host; } if (data.error) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); err = data.error; goto cmd_rel_host; } /* * According to the SD specs, some commands require a delay after * issuing the command. */ if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { err = -EFAULT; goto cmd_rel_host; } if (!idata->ic.write_flag) { if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, idata->buf, idata->buf_bytes)) { err = -EFAULT; goto cmd_rel_host; } } if (is_rpmb) { /* * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ err = ioctl_rpmb_card_status_poll(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", __func__, status, err); } cmd_rel_host: mmc_put_card(card); cmd_done: mmc_blk_put(md); cmd_err: kfree(idata->buf); kfree(idata); return err; } static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; if (cmd == MMC_IOC_CMD) ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); return ret; } #ifdef CONFIG_COMPAT static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); } #endif static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, .ioctl = mmc_blk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mmc_blk_compat_ioctl, #endif }; static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { int ret; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); if (main_md->part_curr == md->part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) return ret; card->ext_csd.part_config = part_config; } main_md->part_curr = md->part_type; return 0; } static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 result; __be32 *blocks; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); mrq.cmd = &cmd; mrq.data = &data; blocks = kmalloc(4, GFP_KERNEL); if (!blocks) return (u32)-1; sg_init_one(&sg, blocks, 4); mmc_wait_for_req(card->host, &mrq); result = ntohl(*blocks); kfree(blocks); if (cmd.error || data.error) result = (u32)-1; return result; } static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, retries); if (err == 0) *status = cmd.resp[0]; return err; } static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, bool hw_busy_detect, struct request *req, int *gen_err) { unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); int err = 0; u32 status; do { err = get_card_status(card, &status, 5); if (err) { pr_err("%s: error %d requesting status\n", req->rq_disk->disk_name, err); return err; } if (status & R1_ERROR) { pr_err("%s: %s: error sending status cmd, status %#x\n", req->rq_disk->disk_name, __func__, status); *gen_err = 1; } /* We may rely on the host hw to handle busy detection.*/ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && hw_busy_detect) break; /* * Timeout if the device never becomes ready for data and never * leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s %s\n", mmc_hostname(card->host), req->rq_disk->disk_name, __func__); return -ETIMEDOUT; } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ } while (!(status & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(status) == R1_STATE_PRG)); return err; } static int send_stop(struct mmc_card *card, unsigned int timeout_ms, struct request *req, int *gen_err, u32 *stop_status) { struct mmc_host *host = card->host; struct mmc_command cmd = {0}; int err; bool use_r1b_resp = rq_data_dir(req) == WRITE; /* * Normally we use R1B responses for WRITE, but in cases where the host * has specified a max_busy_timeout we need to validate it. A failure * means we need to prevent the host from doing hw busy detection, which * is done by converting to a R1 response instead. */ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_STOP_TRANSMISSION; if (use_r1b_resp) { cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; cmd.busy_timeout = timeout_ms; } else { cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } err = mmc_wait_for_cmd(host, &cmd, 5); if (err) return err; *stop_status = cmd.resp[0]; /* No need to check card status in case of READ. */ if (rq_data_dir(req) == READ) return 0; if (!mmc_host_is_spi(host) && (*stop_status & R1_ERROR)) { pr_err("%s: %s: general error sending stop command, resp %#x\n", req->rq_disk->disk_name, __func__, *stop_status); *gen_err = 1; } return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); } #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 #define ERR_CONTINUE 0 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, bool status_valid, u32 status) { switch (error) { case -EILSEQ: /* response crc error, retry the r/w cmd */ pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "response CRC error", name, status); return ERR_RETRY; case -ETIMEDOUT: pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "timed out", name, status); /* If the status cmd initially failed, retry the r/w cmd */ if (!status_valid) return ERR_RETRY; /* * If it was a r/w cmd crc error, or illegal command * (eg, issued in wrong state) then retry - we should * have corrected the state problem above. */ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) return ERR_RETRY; /* Otherwise abort the command */ return ERR_ABORT; default: /* We don't understand the error code the driver gave us */ pr_err("%s: unknown error %d sending read/write command, card status %#x\n", req->rq_disk->disk_name, error, status); return ERR_ABORT; } } /* * Initial r/w and stop cmd error recovery. * We don't know whether the card received the r/w cmd or not, so try to * restore things back to a sane state. Essentially, we do this as follows: * - Obtain card status. If the first attempt to obtain card status fails, * the status word will reflect the failed status cmd, not the failed * r/w cmd. If we fail to obtain card status, it suggests we can no * longer communicate with the card. * - Check the card state. If the card received the cmd but there was a * transient problem with the response, it might still be in a data transfer * mode. Try to send it a stop command. If this fails, we can't recover. * - If the r/w cmd failed due to a response CRC error, it was probably * transient, so retry the cmd. * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or * illegal cmd, retry. * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; int err, retry; if (mmc_card_removed(card)) return ERR_NOMEDIUM; /* * Try to get card status which indicates both the card state * and why there was no response. If the first attempt fails, * we can't be sure the returned status is for the r/w command. */ for (retry = 2; retry >= 0; retry--) { err = get_card_status(card, &status, 0); if (!err) break; /* Re-tune if needed */ mmc_retune_recheck(card->host); prev_cmd_status_valid = false; pr_err("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); } /* We couldn't get a response from the card. Give up. */ if (err) { /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; return ERR_ABORT; } /* Flag ECC errors */ if ((status & R1_CARD_ECC_FAILED) || (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; /* Flag General errors */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if ((status & R1_ERROR) || (brq->stop.resp[0] & R1_ERROR)) { pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0], status); *gen_err = 1; } /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { err = send_stop(card, DIV_ROUND_UP(brq->data.timeout_ns, 1000000), req, gen_err, &stop_status); if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); /* * If the stop cmd also timed out, the card is probably * not present, so abort. Other errors are bad news too. */ return ERR_ABORT; } if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; } /* Check for set block count errors */ if (brq->sbc.error) return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, prev_cmd_status_valid, status); /* Check for r/w command errors */ if (brq->cmd.error) return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, prev_cmd_status_valid, status); /* Data errors */ if (!brq->stop.error) return ERR_CONTINUE; /* Now for stop errors. These aren't fatal to the transfer. */ pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->stop.error, brq->cmd.resp[0], status); /* * Subsitute in our own stop status as this will give the error * state which happened during the execution of the r/w command. */ if (stop_status) { brq->stop.resp[0] = stop_status; brq->stop.error = 0; } return ERR_CONTINUE; } static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { int err; if (md->reset_done & type) return -EEXIST; md->reset_done |= type; err = mmc_hw_reset(host); /* Ensure we switch back to the correct partition */ if (err != -EOPNOTSUPP) { struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); int part_err; main_md->part_curr = main_md->part_type; part_err = mmc_blk_part_switch(host->card, md); if (part_err) { /* * We have failed to get back into the correct * partition, so we need to abort the whole request. */ return -ENODEV; } } return err; } static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) { md->reset_done &= ~type; } int mmc_access_rpmb(struct mmc_queue *mq) { struct mmc_blk_data *md = mq->data; /* * If this is a RPMB partition access, return ture */ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) return true; return false; } static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_DISCARD; if (!mmc_can_erase(card)) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_discard(card)) arg = MMC_DISCARD_ARG; else if (mmc_can_trim(card)) arg = MMC_TRIM_ARG; else arg = MMC_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, 0); if (err) goto out; } err = mmc_erase(card, from, nr, arg); out: if (err == -EIO && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) arg = MMC_SECURE_TRIM1_ARG; else arg = MMC_SECURE_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, arg); if (err == -EIO) goto out_retry; if (err) goto out; if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); if (err == -EIO) goto out_retry; if (err) goto out; } out_retry: if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret = 0; ret = mmc_flush_cache(card); if (ret) ret = -EIO; blk_end_request_all(req, ret); return ret ? 0 : 1; } /* * Reformat current write as a reliable write, supporting * both legacy and the enhanced reliable write MMC cards. * In each transfer we'll handle only as much as a single * reliable write can handle, thus finish the request in * partial completions. */ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, struct mmc_card *card, struct request *req) { if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { /* Legacy mode imposes restrictions on transfers. */ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) brq->data.blocks = 1; if (brq->data.blocks > card->ext_csd.rel_sectors) brq->data.blocks = card->ext_csd.rel_sectors; else if (brq->data.blocks < card->ext_csd.rel_sectors) brq->data.blocks = 1; } } #define CMD_ERRORS \ (R1_OUT_OF_RANGE | /* Command argument out of range */ \ R1_ADDRESS_ERROR | /* Misaligned address */ \ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ R1_WP_VIOLATION | /* Tried to write to protected block */ \ R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ static int mmc_blk_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; int need_retune = card->host->need_retune; int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. * * cmd.error indicates a problem with the r/w command. No * data will have been transferred. * * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: return MMC_BLK_ABORT; case ERR_NOMEDIUM: return MMC_BLK_NOMEDIUM; case ERR_CONTINUE: break; } } /* * Check for errors relating to the execution of the * initial command - such as address errors. No data * has been transferred. */ if (brq->cmd.resp[0] & CMD_ERRORS) { pr_err("%s: r/w command failed, status = %#x\n", req->rq_disk->disk_name, brq->cmd.resp[0]); return MMC_BLK_ABORT; } /* * Everything else is either success, or a data error of some * kind. If it was a write, we may have transitioned to * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { int err; /* Check stop command response */ if (brq->stop.resp[0] & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0]); gen_err = 1; } err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &gen_err); if (err) return MMC_BLK_CMD_ERR; } /* if general error occurs, retry the write operation. */ if (gen_err) { pr_warn("%s: retrying write for general error\n", req->rq_disk->disk_name); return MMC_BLK_RETRY; } if (brq->data.error) { if (need_retune && !brq->retune_retry_done) { pr_info("%s: retrying because a re-tune was needed\n", req->rq_disk->disk_name); brq->retune_retry_done = 1; return MMC_BLK_RETRY; } pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->data.error, (unsigned)blk_rq_pos(req), (unsigned)blk_rq_sectors(req), brq->cmd.resp[0], brq->stop.resp[0]); if (rq_data_dir(req) == READ) { if (ecc_err) return MMC_BLK_ECC_ERR; return MMC_BLK_DATA_ERR; } else { return MMC_BLK_CMD_ERR; } } if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; if (mmc_packed_cmd(mq_mrq->cmd_type)) { if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) return MMC_BLK_PARTIAL; else return MMC_BLK_SUCCESS; } if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } static int mmc_blk_packed_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); struct request *req = mq_rq->req; struct mmc_packed *packed = mq_rq->packed; int err, check, status; u8 *ext_csd; BUG_ON(!packed); packed->retries--; check = mmc_blk_err_check(card, areq); err = get_card_status(card, &status, 0); if (err) { pr_err("%s: error %d sending status command\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if (status & R1_EXCEPTION_EVENT) { err = mmc_get_ext_csd(card, &ext_csd); if (err) { pr_err("%s: error %d sending ext_csd\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & EXT_CSD_PACKED_FAILURE) && (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_GENERIC_ERROR)) { if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_INDEXED_ERROR) { packed->idx_failure = ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; check = MMC_BLK_PARTIAL; } pr_err("%s: packed cmd failed, nr %u, sectors %u, " "failure index: %d\n", req->rq_disk->disk_name, packed->nr_entries, packed->blocks, packed->idx_failure); } kfree(ext_csd); } return check; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, struct mmc_queue *mq) { u32 readcmd, writecmd; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * REQ_META accesses, and are supported only on MMCs. * * XXX: this really needs a good explanation of why REQ_META * is treated special. */ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || (req->cmd_flags & REQ_META)) && (rq_data_dir(req) == WRITE) && (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; brq->data.blocks = blk_rq_sectors(req); /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests. */ if (brq->data.blocks > card->host->max_blk_count) brq->data.blocks = card->host->max_blk_count; if (brq->data.blocks > 1) { /* * After a read error, we redo the request one sector * at a time in order to accurately determine which * sectors can be read successfully. */ if (disable_multi) brq->data.blocks = 1; /* * Some controllers have HW issues while operating * in multiple I/O mode */ if (card->host->ops->multi_io_quirk) brq->data.blocks = card->host->ops->multi_io_quirk(card, (rq_data_dir(req) == READ) ? MMC_DATA_READ : MMC_DATA_WRITE, brq->data.blocks); } if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ if (!mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) brq->mrq.stop = &brq->stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq->mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; if (brq->mrq.stop) brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; if (brq->mrq.stop) brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ do_data_tag = (card->ext_csd.data_tag_unit_size) && (req->cmd_flags & REQ_META) && (rq_data_dir(req) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, * and avoid the final CMD12, as on an error condition * CMD12 (stop) needs to be sent anyway. This, coupled * with Auto-CMD23 enhancements provided by some * hosts, means that the complexity of dealing * with this is best left to the host. If CMD23 is * supported by card and host, we'll fill sbc in and let * the host deal with handling it correctly. This means * that for hosts that don't expose MMC_CAP_CMD23, no * change of behavior will be observed. * * N.B: Some MMC cards experience perf degradation. * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | (do_rel_wr ? (1 << 31) : 0) | (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); /* * Adjust the sg list so it is the same size as the * request. */ if (brq->data.blocks != blk_rq_sectors(req)) { int i, data_size = brq->data.blocks << 9; struct scatterlist *sg; for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { data_size -= sg->length; if (data_size <= 0) { sg->length += data_size; i++; break; } } brq->data.sg_len = i; } mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.err_check = mmc_blk_err_check; mmc_queue_bounce_pre(mqrq); } static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, struct mmc_card *card) { unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; unsigned int max_seg_sz = queue_max_segment_size(q); unsigned int len, nr_segs = 0; do { len = min(hdr_sz, max_seg_sz); hdr_sz -= len; nr_segs++; } while (hdr_sz); return nr_segs; } static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) { struct request_queue *q = mq->queue; struct mmc_card *card = mq->card; struct request *cur = req, *next = NULL; struct mmc_blk_data *md = mq->data; struct mmc_queue_req *mqrq = mq->mqrq_cur; bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; unsigned int req_sectors = 0, phys_segments = 0; unsigned int max_blk_count, max_phys_segs; bool put_back = true; u8 max_packed_rw = 0; u8 reqs = 0; if (!(md->flags & MMC_BLK_PACKED_CMD)) goto no_packed; if ((rq_data_dir(cur) == WRITE) && mmc_host_packed_wr(card->host)) max_packed_rw = card->ext_csd.max_packed_writes; if (max_packed_rw == 0) goto no_packed; if (mmc_req_rel_wr(cur) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) goto no_packed; if (mmc_large_sector(card) && !IS_ALIGNED(blk_rq_sectors(cur), 8)) goto no_packed; mmc_blk_clear_packed(mqrq); max_blk_count = min(card->host->max_blk_count, card->host->max_req_size >> 9); if (unlikely(max_blk_count > 0xffff)) max_blk_count = 0xffff; max_phys_segs = queue_max_segments(q); req_sectors += blk_rq_sectors(cur); phys_segments += cur->nr_phys_segments; if (rq_data_dir(cur) == WRITE) { req_sectors += mmc_large_sector(card) ? 8 : 1; phys_segments += mmc_calc_packed_hdr_segs(q, card); } do { if (reqs >= max_packed_rw - 1) { put_back = false; break; } spin_lock_irq(q->queue_lock); next = blk_fetch_request(q); spin_unlock_irq(q->queue_lock); if (!next) { put_back = false; break; } if (mmc_large_sector(card) && !IS_ALIGNED(blk_rq_sectors(next), 8)) break; if (next->cmd_flags & REQ_DISCARD || next->cmd_flags & REQ_FLUSH) break; if (rq_data_dir(cur) != rq_data_dir(next)) break; if (mmc_req_rel_wr(next) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) break; req_sectors += blk_rq_sectors(next); if (req_sectors > max_blk_count) break; phys_segments += next->nr_phys_segments; if (phys_segments > max_phys_segs) break; list_add_tail(&next->queuelist, &mqrq->packed->list); cur = next; reqs++; } while (1); if (put_back) { spin_lock_irq(q->queue_lock); blk_requeue_request(q, next); spin_unlock_irq(q->queue_lock); } if (reqs > 0) { list_add(&req->queuelist, &mqrq->packed->list); mqrq->packed->nr_entries = ++reqs; mqrq->packed->retries = reqs; return reqs; } no_packed: mqrq->cmd_type = MMC_PACKED_NONE; return 0; } static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, struct mmc_queue *mq) { struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct request *prq; struct mmc_blk_data *md = mq->data; struct mmc_packed *packed = mqrq->packed; bool do_rel_wr, do_data_tag; u32 *packed_cmd_hdr; u8 hdr_blocks; u8 i = 1; BUG_ON(!packed); mqrq->cmd_type = MMC_PACKED_WRITE; packed->blocks = 0; packed->idx_failure = MMC_PACKED_NR_IDX; packed_cmd_hdr = packed->cmd_hdr; memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); packed_cmd_hdr[0] = (packed->nr_entries << 16) | (PACKED_CMD_WR << 8) | PACKED_CMD_VER; hdr_blocks = mmc_large_sector(card) ? 8 : 1; /* * Argument for each entry of packed group */ list_for_each_entry(prq, &packed->list, queuelist) { do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* Argument of CMD23 */ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | blk_rq_sectors(prq); /* Argument of CMD18 or CMD25 */ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ? blk_rq_pos(prq) : blk_rq_pos(prq) << 9; packed->blocks += blk_rq_sectors(prq); i++; } memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->mrq.sbc = &brq->sbc; brq->mrq.stop = &brq->stop; brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->data.blocks = packed->blocks + hdr_blocks; brq->data.flags |= MMC_DATA_WRITE; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.err_check = mmc_blk_packed_err_check; mmc_queue_bounce_pre(mqrq); } static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { struct mmc_queue_req *mq_rq; mq_rq = container_of(brq, struct mmc_queue_req, brq); /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * as reported by the controller (which might be less than * the real number of written sectors, but never more). */ if (mmc_card_sd(card)) { u32 blocks; blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { ret = blk_end_request(req, 0, blocks << 9); } } else { if (!mmc_packed_cmd(mq_rq->cmd_type)) ret = blk_end_request(req, 0, brq->data.bytes_xfered); } return ret; } static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; struct mmc_packed *packed = mq_rq->packed; int idx = packed->idx_failure, i = 0; int ret = 0; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.next); if (idx == i) { /* retry from error index */ packed->nr_entries -= idx; mq_rq->req = prq; ret = 1; if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { list_del_init(&prq->queuelist); mmc_blk_clear_packed(mq_rq); } return ret; } list_del_init(&prq->queuelist); blk_end_request(prq, 0, blk_rq_bytes(prq)); i++; } mmc_blk_clear_packed(mq_rq); return ret; } static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; struct mmc_packed *packed = mq_rq->packed; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.next); list_del_init(&prq->queuelist); blk_end_request(prq, -EIO, blk_rq_bytes(prq)); } mmc_blk_clear_packed(mq_rq); } static void mmc_blk_revert_packed_req(struct mmc_queue *mq, struct mmc_queue_req *mq_rq) { struct request *prq; struct request_queue *q = mq->queue; struct mmc_packed *packed = mq_rq->packed; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.prev); if (prq->queuelist.prev != &packed->list) { list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); blk_requeue_request(mq->queue, prq); spin_unlock_irq(q->queue_lock); } else { list_del_init(&prq->queuelist); } } mmc_blk_clear_packed(mq_rq); } static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mq->mqrq_cur->brq; int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; struct request *req = rqc; struct mmc_async_req *areq; const u8 packed_nr = 2; u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; if (rqc) reqs = mmc_blk_prep_packed_list(mq, rqc); do { if (rqc) { /* * When 4KB native sector is enabled, only 8 blocks * multiple read or write is allowed */ if ((brq->data.blocks & 0x07) && (card->ext_csd.data_sector_size == 4096)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", req->rq_disk->disk_name); mq_rq = mq->mqrq_cur; goto cmd_abort; } if (reqs >= packed_nr) mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq); else mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); if (!areq) { if (status == MMC_BLK_NEW_REQUEST) mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; req = mq_rq->req; type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; mmc_queue_bounce_post(mq_rq); switch (status) { case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: /* * A block was successfully transferred. */ mmc_blk_reset_success(md, type); if (mmc_packed_cmd(mq_rq->cmd_type)) { ret = mmc_blk_end_packed_req(mq_rq); break; } else { ret = blk_end_request(req, 0, brq->data.bytes_xfered); } /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors * were returned by the host controller, it's a bug. */ if (status == MMC_BLK_SUCCESS && ret) { pr_err("%s BUG rq_tot %d d_xfer %d\n", __func__, blk_rq_bytes(req), brq->data.bytes_xfered); rqc = NULL; goto cmd_abort; } break; case MMC_BLK_CMD_ERR: ret = mmc_blk_cmd_err(md, card, brq, req, ret); if (mmc_blk_reset(md, card->host, type)) goto cmd_abort; if (!ret) goto start_new_req; break; case MMC_BLK_RETRY: retune_retry_done = brq->retune_retry_done; if (retry++ < 5) break; /* Fall through */ case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_DATA_ERR: { int err; err = mmc_blk_reset(md, card->host, type); if (!err) break; if (err == -ENODEV || mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } case MMC_BLK_ECC_ERR: if (brq->data.blocks > 1) { /* Redo read one sector at a time */ pr_warn("%s: retrying using single block read\n", req->rq_disk->disk_name); disable_multi = 1; break; } /* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ ret = blk_end_request(req, -EIO, brq->data.blksz); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; default: pr_err("%s: Unhandled return value (%d)", req->rq_disk->disk_name, status); goto cmd_abort; } if (ret) { if (mmc_packed_cmd(mq_rq->cmd_type)) { if (!mq_rq->packed->retries) goto cmd_abort; mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } else { /* * In case of a incomplete request * prepare it again and resend. */ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } mq_rq->brq.retune_retry_done = retune_retry_done; } } while (ret); return 1; cmd_abort: if (mmc_packed_cmd(mq_rq->cmd_type)) { mmc_blk_abort_packed_req(mq_rq); } else { if (mmc_card_removed(card)) req->cmd_flags |= REQ_QUIET; while (ret) ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); } start_new_req: if (rqc) { if (mmc_card_removed(card)) { rqc->cmd_flags |= REQ_QUIET; blk_end_request_all(rqc, -EIO); } else { /* * If current request is packed, it needs to put back. */ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) mmc_blk_revert_packed_req(mq, mq->mqrq_cur); mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); } } return 0; } static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ mmc_get_card(card); ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { blk_end_request_all(req, -EIO); } ret = 0; goto out; } mq->flags &= ~MMC_QUEUE_NEW_REQUEST; if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); if (req->cmd_flags & REQ_SECURE) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { if (!req && host->areq) { spin_lock_irqsave(&host->context_info.lock, flags); host->context_info.is_waiting_last_req = true; spin_unlock_irqrestore(&host->context_info.lock, flags); } ret = mmc_blk_issue_rw_rq(mq, req); } out: if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || (cmd_flags & MMC_REQ_SPECIAL_MASK)) /* * Release host when there are no more requests * and after special request(discard, flush) is done. * In case sepecial request, there is no reentry to * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. */ mmc_put_card(card); return ret; } static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || !(card->csd.cmdclass & CCC_BLOCK_WRITE); } static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct device *parent, sector_t size, bool default_ro, const char *subname, int area_type) { struct mmc_blk_data *md; int devidx, ret; devidx = find_first_zero_bit(dev_use, max_devices); if (devidx >= max_devices) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; } /* * !subname implies we are creating main mmc_blk_data that will be * associated with mmc_card with dev_set_drvdata. Due to device * partitions, devidx will not coincide with a per-physical card * index anymore so we keep track of a name index. */ if (!subname) { int idx; idx = mmc_get_reserved_index(card->host); if (idx >= 0 && !test_bit(idx, name_use)) md->name_idx = idx; else md->name_idx = find_next_zero_bit(name_use, max_devices, mmc_first_nonreserved_index()); __set_bit(md->name_idx, name_use); } else md->name_idx = ((struct mmc_blk_data *) dev_to_disk(parent)->private_data)->name_idx; md->area_type = area_type; /* * Set the read-only status based on the supported commands * and the write protect switch. */ md->read_only = mmc_blk_readonly(card); md->disk = alloc_disk(perdev_minors); if (md->disk == NULL) { ret = -ENOMEM; goto err_kfree; } spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); if (ret) goto err_putdisk; md->queue.issue_fn = mmc_blk_issue_rq; md->queue.data = md; md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) md->disk->flags |= GENHD_FL_NO_PART_SCAN; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: * * - be set for removable media with permanent block devices * - be unset for removable block devices with permanent media * * Since MMC block devices clearly fall under the second * case, we do not set GENHD_FL_REMOVABLE. Userspace * should use the block device creation/destruction hotplug * messages to tell when the card is present. */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", md->name_idx, subname ? subname : ""); if (mmc_card_mmc(card)) blk_queue_logical_block_size(md->queue.queue, card->ext_csd.data_sector_size); else blk_queue_logical_block_size(md->queue.queue, 512); set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { if (mmc_card_mmc(card) || (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT)) md->flags |= MMC_BLK_CMD23; } if (mmc_card_mmc(card) && md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } if (mmc_card_mmc(card) && (area_type == MMC_BLK_DATA_AREA_MAIN) && (md->flags & MMC_BLK_CMD23) && card->ext_csd.packed_event_en) { if (!mmc_packed_init(&md->queue, card)) md->flags |= MMC_BLK_PACKED_CMD; } return md; err_putdisk: put_disk(md->disk); err_kfree: kfree(md); out: return ERR_PTR(ret); } static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) { sector_t size; if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ size = card->ext_csd.sectors; } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ size = (typeof(sector_t))card->csd.capacity << (card->csd.read_blkbits - 9); } return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, MMC_BLK_DATA_AREA_MAIN); } static int mmc_blk_alloc_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_type, sector_t size, bool default_ro, const char *subname, int area_type) { char cap_str[10]; struct mmc_blk_data *part_md; part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, subname, area_type); if (IS_ERR(part_md)) return PTR_ERR(part_md); part_md->part_type = part_type; list_add(&part_md->part, &md->part); string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s partition %u %s\n", part_md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), part_md->part_type, cap_str); return 0; } /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi * to provide access to the partition. */ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { int idx, ret = 0; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, card->part[idx].force_ro, card->part[idx].name, card->part[idx].area_type); if (ret) return ret; } } return ret; } static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { /* * Flush remaining requests and free queues. It * is freeing the queue that stops new requests * from being accepted. */ card = md->queue.card; mmc_cleanup_queue(&md->queue); if (md->flags & MMC_BLK_PACKED_CMD) mmc_packed_clean(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); del_gendisk(md->disk); } mmc_blk_put(md); } } static void mmc_blk_remove_parts(struct mmc_card *card, struct mmc_blk_data *md) { struct list_head *pos, *q; struct mmc_blk_data *part_md; __clear_bit(md->name_idx, name_use); list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); mmc_blk_remove_req(part_md); } } static int mmc_add_disk(struct mmc_blk_data *md) { int ret; struct mmc_card *card = md->queue.card; add_disk(md->disk); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); md->force_ro.attr.name = "force_ro"; md->force_ro.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); if (ret) goto force_ro_fail; if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { umode_t mode; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) mode = S_IRUGO; else mode = S_IRUGO | S_IWUSR; md->power_ro_lock.show = power_ro_lock_show; md->power_ro_lock.store = power_ro_lock_store; sysfs_attr_init(&md->power_ro_lock.attr); md->power_ro_lock.attr.mode = mode; md->power_ro_lock.attr.name = "ro_lock_until_next_power_on"; ret = device_create_file(disk_to_dev(md->disk), &md->power_ro_lock); if (ret) goto power_ro_lock_fail; } return ret; power_ro_lock_fail: device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: del_gendisk(md->disk); return ret; } #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), /* * Some MMC cards experience performance degradation with CMD23 * instead of CMD12-bounded multiblock transfers. For now we'll * black list what's bad... * - Certain Toshiba cards. * * N.B. This doesn't affect SD cards. */ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), /* * Some Micron MMC cards needs longer data read timeout than * indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), /* * On these Samsung MoviNAND parts, performing secure erase or * secure trim can result in unrecoverable corruption due to a * firmware bug. */ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), END_FIXUP }; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; char cap_str[10]; /* * Check that the card supports the command class(es) we need. */ if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; mmc_fixup_device(card, blk_fixups); md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); if (mmc_blk_alloc_parts(card, md)) goto out; dev_set_drvdata(&card->dev, md); if (mmc_add_disk(md)) goto out; list_for_each_entry(part_md, &md->part, part) { if (mmc_add_disk(part_md)) goto out; } pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); /* * Don't enable runtime PM for SD-combo cards here. Leave that * decision to be taken during the SDIO init sequence instead. */ if (card->type != MMC_TYPE_SD_COMBO) { pm_runtime_set_active(&card->dev); pm_runtime_enable(&card->dev); } return 0; out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); return 0; } static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); if (card->type != MMC_TYPE_SD_COMBO) pm_runtime_disable(&card->dev); pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); } static int _mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = dev_get_drvdata(&card->dev); if (md) { mmc_queue_suspend(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_suspend(&part_md->queue); } } return 0; } static void mmc_blk_shutdown(struct mmc_card *card) { _mmc_blk_suspend(card); } #ifdef CONFIG_PM_SLEEP static int mmc_blk_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); return _mmc_blk_suspend(card); } static int mmc_blk_resume(struct device *dev) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = dev_get_drvdata(dev); if (md) { /* * Resume involves the card going into idle state, * so current partition is always the main one. */ md->part_curr = md->part_type; mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } } return 0; } #endif static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); static struct mmc_driver mmc_driver = { .drv = { .name = "mmcblk", .pm = &mmc_blk_pm_ops, }, .probe = mmc_blk_probe, .remove = mmc_blk_remove, .shutdown = mmc_blk_shutdown, }; static int __init mmc_blk_init(void) { int res; if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) pr_info("mmcblk: using %d minors per device\n", perdev_minors); max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) goto out; res = mmc_register_driver(&mmc_driver); if (res) goto out2; return 0; out2: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); out: return res; } static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); } module_init(mmc_blk_init); module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
npf-ati/linux-2.6-imx
drivers/mmc/card/block.c
C
gpl-2.0
67,016
/* * Copyright (C) 2007 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /******************************************************************************* * * Filename: * --------- * AudDrv_Clk.c * * Project: * -------- * MT6583 Audio Driver clock control implement * * Description: * ------------ * Audio register * * Author: * ------- * Chipeng Chang (MTK02308) * *------------------------------------------------------------------------------ * $Revision: #1 $ * $Modtime:$ * $Log:$ * * *******************************************************************************/ /***************************************************************************** * C O M P I L E R F L A G S *****************************************************************************/ /***************************************************************************** * E X T E R N A L R E F E R E N C E S *****************************************************************************/ #include <mach/mt_clkmgr.h> #include <mach/mt_pm_ldo.h> #include <mach/pmic_mt6325_sw.h> #include <mach/upmu_common.h> #include <mach/upmu_hw.h> #include "AudDrv_Common.h" #include "AudDrv_Clk.h" #include "AudDrv_Afe.h" #include <linux/spinlock.h> #include <linux/delay.h> /***************************************************************************** * D A T A T Y P E S *****************************************************************************/ int Aud_Core_Clk_cntr = 0; int Aud_AFE_Clk_cntr = 0; int Aud_I2S_Clk_cntr = 0; int Aud_ADC_Clk_cntr = 0; int Aud_ADC2_Clk_cntr = 0; int Aud_ADC3_Clk_cntr = 0; int Aud_ANA_Clk_cntr = 0; int Aud_HDMI_Clk_cntr = 0; int Aud_APLL22M_Clk_cntr = 0; int Aud_APLL24M_Clk_cntr = 0; int Aud_APLL1_Tuner_cntr = 0; int Aud_APLL2_Tuner_cntr = 0; static int Aud_EMI_cntr = 0; static DEFINE_SPINLOCK(auddrv_Clk_lock); // amp mutex lock static DEFINE_MUTEX(auddrv_pmic_mutex); static DEFINE_MUTEX(audEMI_Clk_mutex); extern void disable_dpidle_by_bit(int id); extern void disable_soidle_by_bit(int id); extern void enable_dpidle_by_bit(int id); extern void enable_soidle_by_bit(int id); void AudDrv_Clk_AllOn(void) { unsigned long flags; printk("AudDrv_Clk_AllOn \n"); spin_lock_irqsave(&auddrv_Clk_lock, flags); Afe_Set_Reg(AUDIO_TOP_CON0, 0x00004000, 0xffffffff); spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void Auddrv_Bus_Init(void) { unsigned long flags; printk("%s \n", __func__); spin_lock_irqsave(&auddrv_Clk_lock, flags); Afe_Set_Reg(AUDIO_TOP_CON0, 0x00004000, 0x00004000); //must set, system will default set bit14 to 0 spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } /***************************************************************************** * FUNCTION * AudDrv_Clk_Power_On / AudDrv_Clk_Power_Off * * DESCRIPTION * Power on this function , then all register can be access and set. * ***************************************************************************** */ void AudDrv_Clk_Power_On(void) { volatile uint32 *AFE_Register = (volatile uint32 *)Get_Afe_Powertop_Pointer(); volatile uint32 val_tmp; printk("%s", __func__); val_tmp = 0xd; mt_reg_sync_writel(val_tmp, AFE_Register); } void AudDrv_Clk_Power_Off(void) { } /***************************************************************************** * FUNCTION * AudDrv_Clk_On / AudDrv_Clk_Off * * DESCRIPTION * Enable/Disable PLL(26M clock) \ AFE clock * ***************************************************************************** */ void AudDrv_Clk_On(void) { unsigned long flags; PRINTK_AUD_CLK("+AudDrv_Clk_On, Aud_AFE_Clk_cntr:%d \n",Aud_AFE_Clk_cntr); spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_AFE_Clk_cntr == 0) { printk("-----------AudDrv_Clk_On, Aud_AFE_Clk_cntr:%d \n", Aud_AFE_Clk_cntr); #ifdef PM_MANAGER_API if (enable_clock(MT_CG_INFRA_AUDIO, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "Aud enable_clock MT_CG_INFRA_AUDIO fail !!!\n"); } if (enable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "Aud enable_clock MT_CG_AUDIO_AFE fail !!!\n"); } #else SetInfraCfg(AUDIO_CG_CLR, 0x2000000, 0x2000000); //bit 25=0, without 133m master and 66m slave bus clock cg gating Afe_Set_Reg(AUDIO_TOP_CON0, 0x4000, 0x06004044); #endif } Aud_AFE_Clk_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); PRINTK_AUD_CLK("-AudDrv_Clk_On, Aud_AFE_Clk_cntr:%d \n",Aud_AFE_Clk_cntr); } void AudDrv_Clk_Off(void) { unsigned long flags; PRINTK_AUD_CLK("+!! AudDrv_Clk_Off, Aud_AFE_Clk_cntr:%d \n",Aud_AFE_Clk_cntr); spin_lock_irqsave(&auddrv_Clk_lock, flags); Aud_AFE_Clk_cntr--; if (Aud_AFE_Clk_cntr == 0) { printk("------------AudDrv_Clk_Off, Aud_AFE_Clk_cntr:%d \n", Aud_AFE_Clk_cntr); { // Disable AFE clock #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "disable_clock MT_CG_AUDIO_AFE fail"); } if (disable_clock(MT_CG_INFRA_AUDIO, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "disable_clock MT_CG_INFRA_AUDIO fail !!!\n"); } #else Afe_Set_Reg(AUDIO_TOP_CON0, 0x06000044, 0x06000044); SetInfraCfg(AUDIO_CG_SET, 0x2000000, 0x2000000); //bit25=1, with 133m mastesr and 66m slave bus clock cg gating #endif } } else if (Aud_AFE_Clk_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_Clk_Off, Aud_AFE_Clk_cntr<0 (%d) \n", Aud_AFE_Clk_cntr); AUDIO_ASSERT(true); Aud_AFE_Clk_cntr = 0; } PRINTK_AUD_CLK("-!! AudDrv_Clk_Off, Aud_AFE_Clk_cntr:%d \n",Aud_AFE_Clk_cntr); spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } /***************************************************************************** * FUNCTION * AudDrv_ANA_Clk_On / AudDrv_ANA_Clk_Off * * DESCRIPTION * Enable/Disable analog part clock * *****************************************************************************/ void AudDrv_ANA_Clk_On(void) { mutex_lock(&auddrv_pmic_mutex); if (Aud_ANA_Clk_cntr == 0) { PRINTK_AUD_CLK("+AudDrv_ANA_Clk_On, Aud_ANA_Clk_cntr:%d \n", Aud_ANA_Clk_cntr); } Aud_ANA_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); //PRINTK_AUD_CLK("-AudDrv_ANA_Clk_Off, Aud_ANA_Clk_cntr:%d \n",Aud_ANA_Clk_cntr); } void AudDrv_ANA_Clk_Off(void) { //PRINTK_AUD_CLK("+AudDrv_ANA_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ANA_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); Aud_ANA_Clk_cntr--; if (Aud_ANA_Clk_cntr == 0) { PRINTK_AUD_CLK("+AudDrv_ANA_Clk_Off disable_clock Ana clk(%x)\n", Aud_ANA_Clk_cntr); // Disable ADC clock #ifdef PM_MANAGER_API #else // TODO:: open ADC clock.... #endif } else if (Aud_ANA_Clk_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_ANA_Clk_Off, Aud_ADC_Clk_cntr<0 (%d) \n", Aud_ANA_Clk_cntr); AUDIO_ASSERT(true); Aud_ANA_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); //PRINTK_AUD_CLK("-AudDrv_ANA_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ANA_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_ADC_Clk_On / AudDrv_ADC_Clk_Off * * DESCRIPTION * Enable/Disable analog part clock * *****************************************************************************/ void AudDrv_ADC_Clk_On(void) { //PRINTK_AUDDRV("+AudDrv_ADC_Clk_On, Aud_ADC_Clk_cntr:%d \n", Aud_ADC_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); if (Aud_ADC_Clk_cntr == 0) { PRINTK_AUDDRV("+AudDrv_ADC_Clk_On enable_clock ADC clk(%x)\n", Aud_ADC_Clk_cntr); Afe_Set_Reg(AUDIO_TOP_CON0, 0 << 24 , 1 << 24); } Aud_ADC_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_ADC_Clk_Off(void) { //PRINTK_AUDDRV("+AudDrv_ADC_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ADC_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); Aud_ADC_Clk_cntr--; if (Aud_ADC_Clk_cntr == 0) { PRINTK_AUDDRV("+AudDrv_ADC_Clk_On disable_clock ADC clk(%x)\n", Aud_ADC_Clk_cntr); Afe_Set_Reg(AUDIO_TOP_CON0, 1 << 24 , 1 << 24); } if (Aud_ADC_Clk_cntr < 0) { PRINTK_AUDDRV("!! AudDrv_ADC_Clk_Off, Aud_ADC_Clk_cntr<0 (%d) \n", Aud_ADC_Clk_cntr); Aud_ADC_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); //PRINTK_AUDDRV("-AudDrv_ADC_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ADC_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_ADC2_Clk_On / AudDrv_ADC2_Clk_Off * * DESCRIPTION * Enable/Disable clock * *****************************************************************************/ void AudDrv_ADC2_Clk_On(void) { PRINTK_AUD_CLK("+%s %d \n", __func__, Aud_ADC2_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); if (Aud_ADC2_Clk_cntr == 0) { PRINTK_AUDDRV("+%s enable_clock ADC clk(%x)\n", __func__, Aud_ADC2_Clk_cntr); #if 0 //K2 removed #ifdef PM_MANAGER_API if (enable_clock(MT_CG_AUDIO_ADDA2, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #else Afe_Set_Reg(AUDIO_TOP_CON0, 0 << 23 , 1 << 23); //temp hard code setting, after confirm with enable clock usage, this could be removed. #endif #endif } Aud_ADC2_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_ADC2_Clk_Off(void) { //PRINTK_AUDDRV("+%s %d \n", __func__,Aud_ADC2_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); Aud_ADC2_Clk_cntr--; if (Aud_ADC2_Clk_cntr == 0) { PRINTK_AUDDRV("+%s disable_clock ADC clk(%x)\n", __func__, Aud_ADC2_Clk_cntr); #if 0 //K2 removed #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_ADDA2, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #else Afe_Set_Reg(AUDIO_TOP_CON0, 1 << 23 , 1 << 23); //temp hard code setting, after confirm with enable clock usage, this could be removed. #endif #endif } if (Aud_ADC2_Clk_cntr < 0) { PRINTK_AUDDRV("%s <0 (%d) \n", __func__, Aud_ADC2_Clk_cntr); Aud_ADC2_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); //PRINTK_AUDDRV("-AudDrv_ADC_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ADC_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_ADC3_Clk_On / AudDrv_ADC3_Clk_Off * * DESCRIPTION * Enable/Disable clock * *****************************************************************************/ void AudDrv_ADC3_Clk_On(void) { PRINTK_AUD_CLK("+%s %d \n", __func__, Aud_ADC3_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); if (Aud_ADC3_Clk_cntr == 0) { PRINTK_AUDDRV("+%s enable_clock ADC clk(%x)\n", __func__, Aud_ADC3_Clk_cntr); #if 0 //K2 removed #ifdef PM_MANAGER_API if (enable_clock(MT_CG_AUDIO_ADDA3, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif #endif } Aud_ADC2_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_ADC3_Clk_Off(void) { //PRINTK_AUDDRV("+%s %d \n", __func__,Aud_ADC2_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); Aud_ADC3_Clk_cntr--; if (Aud_ADC3_Clk_cntr == 0) { PRINTK_AUDDRV("+%s disable_clock ADC clk(%x)\n", __func__, Aud_ADC3_Clk_cntr); #if 0 //K2 removed #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_ADDA3, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif #endif } if (Aud_ADC3_Clk_cntr < 0) { PRINTK_AUDDRV("%s <0 (%d) \n", __func__, Aud_ADC3_Clk_cntr); Aud_ADC3_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); //PRINTK_AUDDRV("-AudDrv_ADC_Clk_Off, Aud_ADC_Clk_cntr:%d \n", Aud_ADC_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_APLL22M_Clk_On / AudDrv_APLL22M_Clk_Off * * DESCRIPTION * Enable/Disable clock * *****************************************************************************/ void AudDrv_APLL22M_Clk_On(void) { PRINTK_AUD_CLK("+%s %d \n", __func__, Aud_APLL22M_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); if (Aud_APLL22M_Clk_cntr == 0) { PRINTK_AUDDRV("+%s enable_clock ADC clk(%x)\n", __func__, Aud_APLL22M_Clk_cntr); #ifdef PM_MANAGER_API enable_mux(MT_MUX_AUD1, "AUDIO"); clkmux_sel(MT_MUX_AUD1, 1 , "AUDIO"); //select APLL1 if (enable_clock(MT_CG_AUDIO_22M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (enable_clock(MT_CG_AUDIO_APLL_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } Aud_APLL22M_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_APLL22M_Clk_Off(void) { mutex_lock(&auddrv_pmic_mutex); Aud_APLL22M_Clk_cntr--; if (Aud_APLL22M_Clk_cntr == 0) { PRINTK_AUDDRV("+%s disable_clock ADC clk(%x)\n", __func__, Aud_APLL22M_Clk_cntr); #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_22M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (disable_clock(MT_CG_AUDIO_APLL_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } clkmux_sel(MT_MUX_AUD1, 0, "AUDIO"); //select 26M disable_mux(MT_MUX_AUD1, "AUDIO"); #endif } if (Aud_APLL22M_Clk_cntr < 0) { PRINTK_AUDDRV("%s <0 (%d) \n", __func__, Aud_APLL22M_Clk_cntr); Aud_APLL22M_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); } /***************************************************************************** * FUNCTION * AudDrv_APLL24M_Clk_On / AudDrv_APLL24M_Clk_Off * * DESCRIPTION * Enable/Disable clock * *****************************************************************************/ void AudDrv_APLL24M_Clk_On(void) { PRINTK_AUD_CLK("+%s %d \n", __func__, Aud_APLL24M_Clk_cntr); mutex_lock(&auddrv_pmic_mutex); if (Aud_APLL24M_Clk_cntr == 0) { PRINTK_AUDDRV("+%s enable_clock ADC clk(%x)\n", __func__, Aud_APLL24M_Clk_cntr); #ifdef PM_MANAGER_API enable_mux(MT_MUX_AUD2, "AUDIO"); clkmux_sel(MT_MUX_AUD2, 1, "AUDIO"); //APLL2 if (enable_clock(MT_CG_AUDIO_24M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (enable_clock(MT_CG_AUDIO_APLL2_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } Aud_APLL24M_Clk_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_APLL24M_Clk_Off(void) { mutex_lock(&auddrv_pmic_mutex); Aud_APLL24M_Clk_cntr--; if (Aud_APLL24M_Clk_cntr == 0) { PRINTK_AUDDRV("+%s disable_clock ADC clk(%x)\n", __func__, Aud_APLL24M_Clk_cntr); #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_24M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (disable_clock(MT_CG_AUDIO_APLL2_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } clkmux_sel(MT_MUX_AUD2, 0, "AUDIO"); //select 26M disable_mux(MT_MUX_AUD2, "AUDIO"); #endif } if (Aud_APLL24M_Clk_cntr < 0) { PRINTK_AUDDRV("%s <0 (%d) \n", __func__, Aud_APLL24M_Clk_cntr); Aud_APLL24M_Clk_cntr = 0; } mutex_unlock(&auddrv_pmic_mutex); } /***************************************************************************** * FUNCTION * AudDrv_I2S_Clk_On / AudDrv_I2S_Clk_Off * * DESCRIPTION * Enable/Disable analog part clock * *****************************************************************************/ void AudDrv_I2S_Clk_On(void) { unsigned long flags; //PRINTK_AUD_CLK("+AudDrv_I2S_Clk_On, Aud_I2S_Clk_cntr:%d \n", Aud_I2S_Clk_cntr); spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_I2S_Clk_cntr == 0) { #ifdef PM_MANAGER_API if (enable_clock(MT_CG_AUDIO_I2S, "AUDIO")) { PRINTK_AUD_ERROR("Aud enable_clock MT65XX_PDN_AUDIO_I2S fail !!!\n"); } #else Afe_Set_Reg(AUDIO_TOP_CON0, 0x00000000, 0x00000040); //power on I2S clock #endif } Aud_I2S_Clk_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_I2S_Clk_Off(void) { unsigned long flags; //PRINTK_AUD_CLK("+AudDrv_I2S_Clk_Off, Aud_I2S_Clk_cntr:%d \n", Aud_I2S_Clk_cntr); spin_lock_irqsave(&auddrv_Clk_lock, flags); Aud_I2S_Clk_cntr--; if (Aud_I2S_Clk_cntr == 0) { #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_I2S, "AUDIO")) { PRINTK_AUD_ERROR("disable_clock MT_CG_AUDIO_I2S fail"); } #else Afe_Set_Reg(AUDIO_TOP_CON0, 0x00000040, 0x00000040); //power off I2S clock #endif } else if (Aud_I2S_Clk_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_I2S_Clk_Off, Aud_I2S_Clk_cntr<0 (%d) \n", Aud_I2S_Clk_cntr); AUDIO_ASSERT(true); Aud_I2S_Clk_cntr = 0; } spin_unlock_irqrestore(&auddrv_Clk_lock, flags); //PRINTK_AUD_CLK("-AudDrv_I2S_Clk_Off, Aud_I2S_Clk_cntr:%d \n",Aud_I2S_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_Core_Clk_On / AudDrv_Core_Clk_Off * * DESCRIPTION * Enable/Disable analog part clock * *****************************************************************************/ void AudDrv_Core_Clk_On(void) { //PRINTK_AUD_CLK("+AudDrv_Core_Clk_On, Aud_Core_Clk_cntr:%d \n", Aud_Core_Clk_cntr); unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_Core_Clk_cntr == 0) { #ifdef PM_MANAGER_API if (enable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { PRINTK_AUD_ERROR("AudDrv_Core_Clk_On Aud enable_clock MT_CG_AUDIO_AFE fail !!!\n"); } #endif } Aud_Core_Clk_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); //PRINTK_AUD_CLK("-AudDrv_Core_Clk_On, Aud_Core_Clk_cntr:%d \n", Aud_Core_Clk_cntr); } void AudDrv_Core_Clk_Off(void) { //PRINTK_AUD_CLK("+AudDrv_Core_Clk_On, Aud_Core_Clk_cntr:%d \n", Aud_Core_Clk_cntr); unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_Core_Clk_cntr == 0) { #ifdef PM_MANAGER_API if (disable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { PRINTK_AUD_ERROR("AudDrv_Core_Clk_On Aud disable_clock MT_CG_AUDIO_AFE fail !!!\n"); } #endif } Aud_Core_Clk_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); //PRINTK_AUD_CLK("-AudDrv_Core_Clk_On, Aud_Core_Clk_cntr:%d \n", Aud_Core_Clk_cntr); } void AudDrv_APLL1Tuner_Clk_On(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_APLL1_Tuner_cntr == 0) { PRINTK_AUD_CLK("+AudDrv_APLLTuner_Clk_On, Aud_APLL1_Tuner_cntr:%d \n", Aud_APLL1_Tuner_cntr); Afe_Set_Reg(AUDIO_TOP_CON0, 0x0 << 19, 0x1 << 19); SetpllCfg(AP_PLL_CON5, 0x1, 0x1); } Aud_APLL1_Tuner_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_APLL1Tuner_Clk_Off(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); Aud_APLL1_Tuner_cntr--; if (Aud_APLL1_Tuner_cntr == 0) { Afe_Set_Reg(AUDIO_TOP_CON0, 0x1 << 19, 0x1 << 19); Afe_Set_Reg(AFE_APLL1_TUNER_CFG, 0x00000033, 0x1 << 19); SetpllCfg(AP_PLL_CON5, 0x0, 0x1); } // handle for clock error else if (Aud_APLL1_Tuner_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_APLLTuner_Clk_Off, Aud_APLL1_Tuner_cntr<0 (%d) \n", Aud_APLL1_Tuner_cntr); Aud_APLL1_Tuner_cntr = 0; } spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_APLL2Tuner_Clk_On(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_APLL2_Tuner_cntr == 0) { PRINTK_AUD_CLK("+Aud_APLL2_Tuner_cntr, Aud_APLL2_Tuner_cntr:%d \n", Aud_APLL2_Tuner_cntr); Afe_Set_Reg(AUDIO_TOP_CON0, 0x0 << 20, 0x1 << 20); Afe_Set_Reg(AFE_APLL2_TUNER_CFG, 0x00000033, 0x1 << 19); SetpllCfg(AP_PLL_CON5, 0x1<<1, 0x1<<1); } Aud_APLL2_Tuner_cntr++; spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_APLL2Tuner_Clk_Off(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); Aud_APLL2_Tuner_cntr--; if (Aud_APLL2_Tuner_cntr == 0) { Afe_Set_Reg(AUDIO_TOP_CON0, 0x1 << 20, 0x1 << 20); SetpllCfg(AP_PLL_CON5, 0x0<<1, 0x1<<1); } // handle for clock error else if (Aud_APLL2_Tuner_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_APLL2Tuner_Clk_Off, Aud_APLL1_Tuner_cntr<0 (%d) \n", Aud_APLL2_Tuner_cntr); Aud_APLL2_Tuner_cntr = 0; } spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } /***************************************************************************** * FUNCTION * AudDrv_HDMI_Clk_On / AudDrv_HDMI_Clk_Off * * DESCRIPTION * Enable/Disable analog part clock * *****************************************************************************/ void AudDrv_HDMI_Clk_On(void) { PRINTK_AUD_CLK("+AudDrv_HDMI_Clk_On, Aud_I2S_Clk_cntr:%d \n", Aud_HDMI_Clk_cntr); if (Aud_HDMI_Clk_cntr == 0) { AudDrv_ANA_Clk_On(); AudDrv_Clk_On(); } Aud_HDMI_Clk_cntr++; } void AudDrv_HDMI_Clk_Off(void) { PRINTK_AUD_CLK("+AudDrv_HDMI_Clk_Off, Aud_I2S_Clk_cntr:%d \n", Aud_HDMI_Clk_cntr); Aud_HDMI_Clk_cntr--; if (Aud_HDMI_Clk_cntr == 0) { AudDrv_ANA_Clk_Off(); AudDrv_Clk_Off(); } else if (Aud_HDMI_Clk_cntr < 0) { PRINTK_AUD_ERROR("!! AudDrv_Linein_Clk_Off, Aud_I2S_Clk_cntr<0 (%d) \n", Aud_HDMI_Clk_cntr); AUDIO_ASSERT(true); Aud_HDMI_Clk_cntr = 0; } PRINTK_AUD_CLK("-AudDrv_I2S_Clk_Off, Aud_I2S_Clk_cntr:%d \n", Aud_HDMI_Clk_cntr); } /***************************************************************************** * FUNCTION * AudDrv_Suspend_Clk_Off / AudDrv_Suspend_Clk_On * * DESCRIPTION * Enable/Disable AFE clock for suspend * ***************************************************************************** */ void AudDrv_Suspend_Clk_Off(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_Core_Clk_cntr > 0) { #ifdef PM_MANAGER_API if (Aud_AFE_Clk_cntr > 0) { if (disable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "Aud enable_clock MT_CG_AUDIO_AFE fail !!!\n"); } } if (Aud_I2S_Clk_cntr > 0) { if (disable_clock(MT_CG_AUDIO_I2S, "AUDIO")) { PRINTK_AUD_ERROR("disable_clock MT_CG_AUDIO_I2S fail"); } } if (Aud_ADC_Clk_cntr > 0) { Afe_Set_Reg(AUDIO_TOP_CON0, 1 << 24 , 1 << 24); } if (Aud_ADC2_Clk_cntr > 0) { #if 0 //K2 removed if (disable_clock(MT_CG_AUDIO_ADDA2, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } if (Aud_ADC3_Clk_cntr > 0) { #if 0 //K2 removed if (disable_clock(MT_CG_AUDIO_ADDA3, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } if (Aud_ANA_Clk_cntr > 0) { } if (Aud_HDMI_Clk_cntr > 0) { } if (Aud_APLL22M_Clk_cntr > 0) { if (disable_clock(MT_CG_AUDIO_22M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (disable_clock(MT_CG_AUDIO_APLL_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } clkmux_sel(MT_MUX_AUD1, 0, "AUDIO"); //select 26M disable_mux(MT_MUX_AUD1, "AUDIO"); } if (Aud_APLL24M_Clk_cntr > 0) { if (disable_clock(MT_CG_AUDIO_24M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (disable_clock(MT_CG_AUDIO_APLL2_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } clkmux_sel(MT_MUX_AUD2, 0, "AUDIO"); //select 26M disable_mux(MT_MUX_AUD2, "AUDIO"); } #endif } spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_Suspend_Clk_On(void) { unsigned long flags; spin_lock_irqsave(&auddrv_Clk_lock, flags); if (Aud_Core_Clk_cntr > 0) { #ifdef PM_MANAGER_API if (Aud_AFE_Clk_cntr > 0) { if (enable_clock(MT_CG_AUDIO_AFE, "AUDIO")) { xlog_printk(ANDROID_LOG_ERROR, "Sound", "Aud enable_clock MT_CG_AUDIO_AFE fail !!!\n"); } } if (Aud_I2S_Clk_cntr > 0) { if (enable_clock(MT_CG_AUDIO_I2S, "AUDIO")) { PRINTK_AUD_ERROR("enable_clock MT_CG_AUDIO_I2S fail"); } } if (Aud_ADC_Clk_cntr > 0) { Afe_Set_Reg(AUDIO_TOP_CON0, 0 << 24 , 1 << 24); } if (Aud_ADC2_Clk_cntr > 0) { #if 0 //K2 removed if (enable_clock(MT_CG_AUDIO_ADDA2, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } if (Aud_ADC3_Clk_cntr > 0) { #if 0 //K2 removed if (enable_clock(MT_CG_AUDIO_ADDA3, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } #endif } if (Aud_ANA_Clk_cntr > 0) { } if (Aud_HDMI_Clk_cntr > 0) { } if (Aud_APLL22M_Clk_cntr > 0) { enable_mux(MT_MUX_AUD1, "AUDIO"); clkmux_sel(MT_MUX_AUD1, 1 , "AUDIO"); //select APLL1 if (enable_clock(MT_CG_AUDIO_22M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (enable_clock(MT_CG_AUDIO_APLL_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } } if (Aud_APLL24M_Clk_cntr > 0) { enable_mux(MT_MUX_AUD2, "AUDIO"); clkmux_sel(MT_MUX_AUD2, 1, "AUDIO"); //APLL2 if (enable_clock(MT_CG_AUDIO_24M, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } if (enable_clock(MT_CG_AUDIO_APLL2_TUNER, "AUDIO")) { PRINTK_AUD_CLK("%s fail", __func__); } } #endif } spin_unlock_irqrestore(&auddrv_Clk_lock, flags); } void AudDrv_Emi_Clk_On(void) { mutex_lock(&auddrv_pmic_mutex); if(Aud_EMI_cntr == 0) { #ifndef DENALI_FPGA_EARLYPORTING //george early porting disable disable_dpidle_by_bit(MT_CG_AUDIO_AFE); disable_soidle_by_bit(MT_CG_AUDIO_AFE); #endif } Aud_EMI_cntr++; mutex_unlock(&auddrv_pmic_mutex); } void AudDrv_Emi_Clk_Off(void) { mutex_lock(&auddrv_pmic_mutex); Aud_EMI_cntr--; if(Aud_EMI_cntr ==0) { #ifndef DENALI_FPGA_EARLYPORTING //george early porting disable enable_dpidle_by_bit(MT_CG_AUDIO_AFE); enable_soidle_by_bit(MT_CG_AUDIO_AFE); #endif } if(Aud_EMI_cntr <0 ) { Aud_EMI_cntr = 0; printk("Aud_EMI_cntr = %d \n",Aud_EMI_cntr); } mutex_unlock(&auddrv_pmic_mutex); } // export symbol for other module use EXPORT_SYMBOL(AudDrv_Clk_On); EXPORT_SYMBOL(AudDrv_Clk_Off); EXPORT_SYMBOL(AudDrv_ANA_Clk_On); EXPORT_SYMBOL(AudDrv_ANA_Clk_Off); EXPORT_SYMBOL(AudDrv_I2S_Clk_On); EXPORT_SYMBOL(AudDrv_I2S_Clk_Off);
bq/aquaris-E4
sound/soc/mediatek/mt_soc_audio_v3/AudDrv_Clk.c
C
gpl-2.0
28,643
/* * drivers/gpu/ion/ion.c * * Copyright (C) 2011 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/device.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/ion.h> #include <linux/mtk_ion.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/miscdevice.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> #include "ion_priv.h" #include "ion_profile.h" #define DEBUG_HEAP_SHRINKER #if 0 //we move it to ion_priv.h. so we can dump every buffer info in ion_mm_heap.c /** * struct ion_device - the metadata of the ion device node * @dev: the actual misc device * @buffers: an rb tree of all the existing buffers * @buffer_lock: lock protecting the tree of buffers * @lock: rwsem protecting the tree of heaps and clients * @heaps: list of all the heaps in the system * @user_clients: list of all the clients created from userspace */ struct ion_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head heaps; long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg); struct rb_root clients; struct dentry *debug_root; }; /** * struct ion_client - a process/hw block local address space * @node: node in the tree of all clients * @dev: backpointer to ion device * @handles: an rb tree of all the handles in this client * @idr: an idr space for allocating handle ids * @lock: lock protecting the tree of handles * @name: used for debugging * @task: used for debugging * * A client represents a list of buffers this client may access. * The mutex stored here is used to protect both handles tree * as well as the handles themselves, and should be held while modifying either. */ struct ion_client { struct rb_node node; struct ion_device *dev; struct rb_root handles; struct idr idr; struct mutex lock; const char *name; struct task_struct *task; pid_t pid; struct dentry *debug_root; }; struct ion_handle_debug { pid_t pid; pid_t tgid; unsigned int backtrace[BACKTRACE_SIZE]; unsigned int backtrace_num; }; /** * ion_handle - a client local reference to a buffer * @ref: reference count * @client: back pointer to the client the buffer resides in * @buffer: pointer to the buffer * @node: node in the client's handle rbtree * @kmap_cnt: count of times this client has mapped to kernel * @id: client-unique id allocated by client->idr * * Modifications to node, map_cnt or mapping should be protected by the * lock in the client. Other fields are never changed after initialization. */ struct ion_handle { struct kref ref; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; #if ION_RUNTIME_DEBUGGER struct ion_handle_debug dbg; #endif }; #endif bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) { return ((buffer->flags & ION_FLAG_CACHED) && !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)); } bool ion_buffer_cached(struct ion_buffer *buffer) { return !!(buffer->flags & ION_FLAG_CACHED); } static inline struct page *ion_buffer_page(struct page *page) { return (struct page *)((unsigned long)page & ~(1UL)); } static inline bool ion_buffer_page_is_dirty(struct page *page) { return !!((unsigned long)page & 1UL); } static inline void ion_buffer_page_dirty(struct page **page) { *page = (struct page *)((unsigned long)(*page) | 1UL); } static inline void ion_buffer_page_clean(struct page **page) { *page = (struct page *)((unsigned long)(*page) & ~(1UL)); } /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct ion_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("%s: buffer already found.", __func__); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct ion_buffer *buffer; struct sg_table *table; struct scatterlist *sg; int i, ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->heap = heap; buffer->flags = flags; kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) goto err2; ion_heap_freelist_drain(heap, 0); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) goto err2; } buffer->dev = dev; buffer->size = len; table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { heap->ops->free(buffer); kfree(buffer); return ERR_PTR(PTR_ERR(table)); } buffer->sg_table = table; if (ion_buffer_fault_user_mappings(buffer)) { int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; struct scatterlist *sg; int i, j, k = 0; buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { ret = -ENOMEM; goto err1; } for_each_sg(table->sgl, sg, table->nents, i) { struct page *page = sg_page(sg); for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++) buffer->pages[k++] = page++; } if (ret) goto err; } buffer->dev = dev; buffer->size = len; INIT_LIST_HEAD(&buffer->vmas); //log task pid for debug +by k.zhang { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_init(&buffer->lock); /* this will set up dma addresses for the sglist -- it is not technically correct as per the dma api -- a specific device isn't really taking ownership here. However, in practice on our systems the only dma_address space is physical addresses. Additionally, we can't afford the overhead of invalidating every allocation via dma_map_sg. The implicit contract here is that memory comming from the heaps is ready for dma, ie if it has a cached mapping that mapping has been invalidated */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) sg_dma_address(sg) = sg_phys(sg); mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); return buffer; err: heap->ops->unmap_dma(heap, buffer); heap->ops->free(buffer); err1: if (buffer->pages) vfree(buffer->pages); err2: kfree(buffer); return ERR_PTR(ret); } void ion_buffer_destroy(struct ion_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); if (buffer->pages) vfree(buffer->pages); kfree(buffer); } static void _ion_buffer_destroy(struct kref *kref) { struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_freelist_add(heap, buffer); else ion_buffer_destroy(buffer); } static void ion_buffer_get(struct ion_buffer *buffer) { kref_get(&buffer->ref); } static int ion_buffer_put(struct ion_buffer *buffer) { return kref_put(&buffer->ref, _ion_buffer_destroy); } static void ion_buffer_add_to_handle(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); buffer->handle_count++; mutex_unlock(&buffer->lock); } static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) { /* * when a buffer is removed from a handle, if it is not in * any other handles, copy the taskcomm and the pid of the * process it's being removed from into the buffer. At this * point there will be no way to track what processes this buffer is * being used by, it only exists as a dma_buf file descriptor. * The taskcomm and pid can provide a debug hint as to where this fd * is in the system */ mutex_lock(&buffer->lock); buffer->handle_count--; BUG_ON(buffer->handle_count < 0); if (!buffer->handle_count) { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_unlock(&buffer->lock); } static struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); kref_init(&handle->ref); rb_init_node(&handle->node); handle->client = client; ion_buffer_get(buffer); ion_buffer_add_to_handle(buffer); handle->buffer = buffer; return handle; } static void ion_handle_kmap_put(struct ion_handle *); static void ion_handle_destroy(struct kref *kref) { struct ion_handle *handle = container_of(kref, struct ion_handle, ref); struct ion_client *client = handle->client; struct ion_buffer *buffer = handle->buffer; mutex_lock(&buffer->lock); while (handle->kmap_cnt) ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); idr_remove(&client->idr, handle->id); if (!RB_EMPTY_NODE(&handle->node)) rb_erase(&handle->node, &client->handles); ion_buffer_remove_from_handle(buffer); ion_buffer_put(buffer); handle->buffer = NULL; handle->client = NULL; kfree(handle); } struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) { return handle->buffer; } static void ion_handle_get(struct ion_handle *handle) { kref_get(&handle->ref); } static int ion_handle_put(struct ion_handle *handle) { return kref_put(&handle->ref, ion_handle_destroy); } static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n = client->handles.rb_node; while (n) { struct ion_handle *entry = rb_entry(n, struct ion_handle, node); if (buffer < entry->buffer) n = n->rb_left; else if (buffer > entry->buffer) n = n->rb_right; else return entry; } return ERR_PTR(-EINVAL); } struct ion_handle *ion_uhandle_get(struct ion_client *client, int id) { return idr_find(&client->idr, id); } bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { return (ion_uhandle_get(client, handle->id) == handle); } static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) { int rc; struct rb_node **p = &client->handles.rb_node; struct rb_node *parent = NULL; struct ion_handle *entry; do { int id; rc = idr_pre_get(&client->idr, GFP_KERNEL); if (!rc) return -ENOMEM; rc = idr_get_new_above(&client->idr, handle, 1, &id); handle->id = id; } while (rc == -EAGAIN); if (rc < 0) return rc; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_handle, node); if (handle->buffer < entry->buffer) p = &(*p)->rb_left; else if (handle->buffer > entry->buffer) p = &(*p)->rb_right; else WARN(1, "%s: buffer already found.", __func__); } rb_link_node(&handle->node, parent, p); rb_insert_color(&handle->node, &client->handles); return 0; } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags) { struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ if (WARN_ON(!len)) return ERR_PTR(-EINVAL); //add by k.zhang if((len > 1024*1024*1024)) { IONMSG("%s error: size (%d) is more than 1G !!\n", len); return ERR_PTR(-EINVAL); } MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagStart, len, 0); len = PAGE_ALIGN(len); down_read(&dev->lock); plist_for_each_entry(heap, &dev->heaps, node) { /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR(buffer)) break; } up_read(&dev->lock); if (buffer == NULL) return ERR_PTR(-ENODEV); if (IS_ERR(buffer)) return ERR_PTR(PTR_ERR(buffer)); handle = ion_handle_create(client, buffer); /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); if (IS_ERR(handle)) return handle; mutex_lock(&client->lock); ret = ion_handle_add(client, handle); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); } mutex_unlock(&client->lock); MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagEnd, buffer->size, 0); return handle; } EXPORT_SYMBOL(ion_alloc); void ion_free(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); mutex_unlock(&client->lock); return; } ion_handle_put(handle); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_free); int ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { struct ion_buffer *buffer; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagStart, (unsigned int)client, (unsigned int)handle); mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return -ENODEV; } mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagEnd, buffer->size, *addr); return ret; } EXPORT_SYMBOL(ion_phys); static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; if (buffer->kmap_cnt) { buffer->kmap_cnt++; return buffer->vaddr; } vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) return vaddr; buffer->vaddr = vaddr; buffer->kmap_cnt++; return vaddr; } static void *ion_handle_kmap_get(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; void *vaddr; if (handle->kmap_cnt) { handle->kmap_cnt++; return buffer->vaddr; } vaddr = ion_buffer_kmap_get(buffer); if (IS_ERR(vaddr)) return vaddr; handle->kmap_cnt++; return vaddr; } static void ion_buffer_kmap_put(struct ion_buffer *buffer) { buffer->kmap_cnt--; if (!buffer->kmap_cnt) { MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagStart, buffer->size, 0); buffer->heap->ops->unmap_kernel(buffer->heap, buffer); MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagEnd, buffer->size, 0); buffer->vaddr = NULL; } } static void ion_handle_kmap_put(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; handle->kmap_cnt--; if (!handle->kmap_cnt) ion_buffer_kmap_put(buffer); } void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } mutex_lock(&buffer->lock); vaddr = ion_handle_kmap_get(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; } EXPORT_SYMBOL(ion_map_kernel); void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_unmap_kernel); static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; size_t sizes[ION_NUM_HEAP_IDS] = {0}; const char *names[ION_NUM_HEAP_IDS] = {0}; int i; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); unsigned int id = handle->buffer->heap->id; if (!names[id]) names[id] = handle->buffer->heap->name; sizes[id] += handle->buffer->size; } mutex_unlock(&client->lock); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAP_IDS; i++) { if (!names[i]) continue; seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); } return 0; } static int ion_debug_client_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_client_show, inode->i_private); } static const struct file_operations debug_client_fops = { .open = ion_debug_client_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; struct ion_client *ion_client_create(struct ion_device *dev, const char *name) { struct ion_client *client; struct task_struct *task; struct rb_node **p; struct rb_node *parent = NULL; struct ion_client *entry; char debug_name[64]; pid_t pid; get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); /* don't bother to store task struct for kernel threads, they can't be killed anyway */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; } else { task = current->group_leader; } task_unlock(current->group_leader); client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); if (!client) { if (task) put_task_struct(current->group_leader); return ERR_PTR(-ENOMEM); } client->dev = dev; client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); client->name = name; client->task = task; client->pid = pid; down_write(&dev->lock); p = &dev->clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (client < entry) p = &(*p)->rb_left; else if (client > entry) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->clients); snprintf(debug_name, 64, "%u", client->pid); client->debug_root = debugfs_create_file(debug_name, 0664, dev->debug_root, client, &debug_client_fops); up_write(&dev->lock); return client; } EXPORT_SYMBOL(ion_client_create); void ion_client_destroy(struct ion_client *client) { struct ion_device *dev = client->dev; struct rb_node *n; pr_debug("%s: %d\n", __func__, __LINE__); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); mutex_lock(&client->lock); ion_handle_destroy(&handle->ref); mutex_unlock(&client->lock); } idr_remove_all(&client->idr); idr_destroy(&client->idr); down_write(&dev->lock); if (client->task) put_task_struct(client->task); rb_erase(&client->node, &dev->clients); debugfs_remove_recursive(client->debug_root); up_write(&dev->lock); kfree(client); } EXPORT_SYMBOL(ion_client_destroy); struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct sg_table *table; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; table = buffer->sg_table; mutex_unlock(&client->lock); return table; } EXPORT_SYMBOL(ion_sg_table); static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; ion_buffer_sync_for_device(buffer, attachment->dev, direction); return buffer->sg_table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { } struct ion_vma_list { struct list_head list; struct vm_area_struct *vma; }; static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction dir) { struct ion_vma_list *vma_list; int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; int i; pr_debug("%s: syncing for device %s\n", __func__, dev ? dev_name(dev) : "null"); if (!ion_buffer_fault_user_mappings(buffer)) return; mutex_lock(&buffer->lock); for (i = 0; i < pages; i++) { struct page *page = buffer->pages[i]; if (ion_buffer_page_is_dirty(page)) __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir); ion_buffer_page_clean(buffer->pages + i); } list_for_each_entry(vma_list, &buffer->vmas, list) { struct vm_area_struct *vma = vma_list->vma; zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } mutex_unlock(&buffer->lock); } int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ion_buffer *buffer = vma->vm_private_data; int ret; mutex_lock(&buffer->lock); ion_buffer_page_dirty(buffer->pages + vmf->pgoff); BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, ion_buffer_page(buffer->pages[vmf->pgoff])); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; return VM_FAULT_NOPAGE; } static void ion_vm_open(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list; vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); if (!vma_list) return; vma_list->vma = vma; mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); pr_debug("%s: adding %p\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list, *tmp; pr_debug("%s\n", __func__); mutex_lock(&buffer->lock); list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { if (vma_list->vma != vma) continue; list_del(&vma_list->list); kfree(vma_list); pr_debug("%s: deleting %p\n", __func__, vma); break; } mutex_unlock(&buffer->lock); } struct vm_operations_struct ion_vma_ops = { .open = ion_vm_open, .close = ion_vm_close, .fault = ion_vm_fault, }; static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; int ret = 0; MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagStart, buffer->size, vma->vm_start); if (!buffer->heap->ops->map_user) { pr_err("%s: this heap does not define a method for mapping " "to userspace\n", __func__); return -EINVAL; } if (ion_buffer_fault_user_mappings(buffer)) { vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; ion_vm_open(vma); return 0; } //if (!(buffer->flags & ION_FLAG_CACHED)) //vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); mutex_unlock(&buffer->lock); if (ret) pr_err("%s: failure mapping buffer to userspace\n", __func__); MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagEnd, buffer->size, vma->vm_start); return ret; } static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; ion_buffer_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; return buffer->vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { return; } static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; if (!buffer->heap->ops->map_kernel) { pr_err("%s: map kernel is not implemented by this heap.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); return 0; } static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); } struct dma_buf_ops dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .kmap_atomic = ion_dma_buf_kmap, .kunmap_atomic = ion_dma_buf_kunmap, .kmap = ion_dma_buf_kmap, .kunmap = ion_dma_buf_kunmap, }; struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); return ERR_PTR(-EINVAL); } buffer = handle->buffer; ion_buffer_get(buffer); dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); if (IS_ERR(dmabuf)) { ion_buffer_put(buffer); return dmabuf; } return dmabuf; } EXPORT_SYMBOL(ion_share_dma_buf); int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) { struct dma_buf *dmabuf; int fd; dmabuf = ion_share_dma_buf(client, handle); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); return fd; } EXPORT_SYMBOL(ion_share_dma_buf_fd); struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagStart, 1, 1); dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return ERR_PTR(PTR_ERR(dmabuf)); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR(handle)) { ion_handle_get(handle); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR(handle)) goto end; ret = ion_handle_add(client, handle); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); } end: mutex_unlock(&client->lock); dma_buf_put(dmabuf); MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagEnd, 1, 1); return handle; } EXPORT_SYMBOL(ion_import_dma_buf); static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not sync dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return -EINVAL; } buffer = dmabuf->priv; dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_BIDIRECTIONAL); dma_buf_put(dmabuf); return 0; } static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; switch (cmd) { case ION_IOC_ALLOC: { struct ion_allocation_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; handle = ion_alloc(client, data.len, data.align, data.heap_id_mask, data.flags); if (IS_ERR(handle)) return PTR_ERR(handle); data.handle = (struct ion_handle *)handle->id; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { ion_free(client, handle); return -EFAULT; } break; } case ION_IOC_FREE: { struct ion_handle_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); handle = ion_uhandle_get(client, (int)data.handle); mutex_unlock(&client->lock); if(IS_ERR_OR_NULL(handle)) { pr_err("%s: handle invalid, handle_id=%d.\n", __FUNCTION__, (int)data.handle); return -EINVAL; } ion_free(client, handle); break; } case ION_IOC_SHARE: case ION_IOC_MAP: { struct ion_fd_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; handle = ion_uhandle_get(client, (int)data.handle); if(IS_ERR_OR_NULL(handle)) { pr_err("%s: handle invalid, handle_id=%d\n", __FUNCTION__, (int)data.handle); return -EINVAL; } data.fd = ion_share_dma_buf_fd(client, handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; if (data.fd < 0) return data.fd; break; } case ION_IOC_IMPORT: { struct ion_fd_data data; struct ion_handle *handle; int ret = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; handle = ion_import_dma_buf(client, data.fd); if (IS_ERR(handle)) ret = PTR_ERR(handle); else data.handle = (struct ion_handle *)handle->id; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; if (ret < 0) return ret; break; } case ION_IOC_SYNC: { struct ion_fd_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; ion_sync_for_device(client, data.fd); break; } case ION_IOC_CUSTOM: { struct ion_device *dev = client->dev; struct ion_custom_data data; if (!dev->custom_ioctl) return -ENOTTY; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_custom_data))) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } default: return -ENOTTY; } return 0; } static int ion_release(struct inode *inode, struct file *file) { struct ion_client *client = file->private_data; pr_debug("%s: %d\n", __func__, __LINE__); ion_client_destroy(client); return 0; } static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; pr_debug("%s: %d\n", __func__, __LINE__); client = ion_client_create(dev, "user"); if (IS_ERR(client)) return PTR_ERR(client); file->private_data = client; return 0; } static const struct file_operations ion_fops = { .owner = THIS_MODULE, .open = ion_open, .release = ion_release, .unlocked_ioctl = ion_ioctl, }; static size_t ion_debug_heap_total(struct ion_client *client, unsigned int id) { size_t size = 0; struct rb_node *n; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer->heap->id == id) size += handle->buffer->size; } mutex_unlock(&client->lock); return size; } static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; size_t total_size = 0; size_t total_orphaned_size = 0; seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); seq_printf(s, "----------------------------------------------------\n"); down_read(&dev->lock); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); size_t size = ion_debug_heap_total(client, heap->id); if (!size) continue; if (client->task) { char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, size); } else { seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, size); } } up_read(&dev->lock); seq_printf(s, "----------------------------------------------------\n"); seq_printf(s, "orphaned allocations (info is from last known client):" "\n"); mutex_lock(&dev->buffer_lock); for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); if (buffer->heap->id != heap->id) continue; total_size += buffer->size; if (!buffer->handle_count) { seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, atomic_read(&buffer->ref.refcount)); total_orphaned_size += buffer->size; } } mutex_unlock(&dev->buffer_lock); seq_printf(s, "----------------------------------------------------\n"); seq_printf(s, "%16.s %16u\n", "total orphaned", total_orphaned_size); seq_printf(s, "%16.s %16u\n", "total ", total_size); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) seq_printf(s, "%16.s %16u\n", "deferred free", heap->free_list_size); seq_printf(s, "----------------------------------------------------\n"); if (heap->debug_show) heap->debug_show(heap, s, unused); return 0; } static int ion_debug_heap_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_show, inode->i_private); } static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; if (!val) return 0; objs = heap->shrinker.shrink(&heap->shrinker, &sc); sc.nr_to_scan = objs; heap->shrinker.shrink(&heap->shrinker, &sc); return 0; } static int debug_shrink_get(void *data, u64 *val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; objs = heap->shrinker.shrink(&heap->shrinker, &sc); *val = objs; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); #endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || !heap->ops->unmap_dma) pr_err("%s: can not add heap with invalid ops struct.\n", __func__); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_init_deferred_free(heap); heap->dev = dev; down_write(&dev->lock); /* use negative heap->id to reverse the priority -- when traversing the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debugfs_create_file(heap->name, 0664, dev->debug_root, heap, &debug_heap_fops); #ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.shrink) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); debugfs_create_file(debug_name, 0644, dev->debug_root, heap, &debug_shrink_fops); } #endif up_write(&dev->lock); } struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); if (!idev) return ERR_PTR(-ENOMEM); idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; idev->dev.fops = &ion_fops; idev->dev.parent = NULL; ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); if (!idev->debug_root) pr_err("ion: failed to create debug files.\n"); idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->buffer_lock); init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; return idev; } void ion_device_destroy(struct ion_device *dev) { misc_deregister(&dev->dev); /* XXX need to free the heaps and clients ? */ kfree(dev); } void __init ion_reserve(struct ion_platform_data *data) { int i; for (i = 0; i < data->nr; i++) { if (data->heaps[i].size == 0) continue; IONMSG("reserve memory: base=0x%x, size=0x%x\n", data->heaps[i].base, data->heaps[i].size); if (data->heaps[i].base == 0) { phys_addr_t paddr; paddr = memblock_alloc_base(data->heaps[i].size, data->heaps[i].align, MEMBLOCK_ALLOC_ANYWHERE); if (!paddr) { pr_err("%s: error allocating memblock for " "heap %d\n", __func__, i); continue; } data->heaps[i].base = paddr; } else { int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) pr_err("memblock reserve of %x@%lx failed\n", data->heaps[i].size, data->heaps[i].base); } pr_info("%s: %s reserved base %lx size %d\n", __func__, data->heaps[i].name, data->heaps[i].base, data->heaps[i].size); } }
HelllGuest/boom_kernel_sprout_kk
drivers/gpu/ion/ion.c
C
gpl-2.0
48,833
/************************************************************************** * Copyright (c) 2007, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * **************************************************************************/ #include <drm/drmP.h> #ifdef CONFIG_DRM_VXD_BYT #include "vxd_drv.h" #else #include "psb_drv.h" #include "psb_reg.h" #endif #ifdef SUPPORT_VSP #include "vsp.h" #endif /* * Code for the MSVDX/TOPAZ MMU: */ /* * clflush on one processor only: * clflush should apparently flush the cache line on all processors in an * SMP system. */ /* * kmap atomic: * The usage of the slots must be completely encapsulated within a spinlock, and * no other functions that may be using the locks for other purposed may be * called from within the locked region. * Since the slots are per processor, this will guarantee that we are the only * user. */ /* * TODO: Inserting ptes from an interrupt handler: * This may be desirable for some SGX functionality where the GPU can fault in * needed pages. For that, we need to make an atomic insert_pages function, that * may fail. * If it fails, the caller need to insert the page using a workqueue function, * but on average it should be fast. */ struct psb_mmu_driver { /* protects driver- and pd structures. Always take in read mode * before taking the page table spinlock. */ struct rw_semaphore sem; /* protects page tables, directory tables and pt tables. * and pt structures. */ spinlock_t lock; atomic_t needs_tlbflush; uint8_t __iomem *register_map; struct psb_mmu_pd *default_pd; /*uint32_t bif_ctrl;*/ int has_clflush; int clflush_add; unsigned long clflush_mask; struct drm_psb_private *dev_priv; enum mmu_type_t mmu_type; }; struct psb_mmu_pd; struct psb_mmu_pt { struct psb_mmu_pd *pd; uint32_t index; uint32_t count; struct page *p; uint32_t *v; }; struct psb_mmu_pd { struct psb_mmu_driver *driver; int hw_context; struct psb_mmu_pt **tables; struct page *p; struct page *dummy_pt; struct page *dummy_page; uint32_t pd_mask; uint32_t invalid_pde; uint32_t invalid_pte; }; static inline uint32_t psb_mmu_pt_index(uint32_t offset) { return (offset >> PSB_PTE_SHIFT) & 0x3FF; } static inline uint32_t psb_mmu_pd_index(uint32_t offset) { return offset >> PSB_PDE_SHIFT; } #if defined(CONFIG_X86) static inline void psb_clflush(volatile void *addr) { __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); } static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) { if (!driver->has_clflush) return; mb(); psb_clflush(addr); mb(); } static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) { uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; int i; uint8_t *clf; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) clf = kmap_atomic(page, KM_USER0); #else clf = kmap_atomic(page); #endif mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(clf, KM_USER0); #else kunmap_atomic(clf); #endif } static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) { int i; if (!driver->has_clflush) return ; for (i = 0; i < num_pages; i++) psb_page_clflush(driver, *page++); } #else static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) { ; } static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) { printk("Dumy psb_pages_clflush\n"); } #endif static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force) { if (atomic_read(&driver->needs_tlbflush) || force) { if (!driver->dev_priv) goto out; if (driver->mmu_type == IMG_MMU) { atomic_set( &driver->dev_priv->msvdx_mmu_invaldc, 1); #ifndef CONFIG_DRM_VXD_BYT atomic_set( &driver->dev_priv->topaz_mmu_invaldc, 1); #endif } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); } } out: atomic_set(&driver->needs_tlbflush, 0); } #if 0 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) { down_write(&driver->sem); psb_mmu_flush_pd_locked(driver, force); up_write(&driver->sem); } #endif static void psb_virtual_addr_clflush(struct psb_mmu_driver *driver, void *vaddr, uint32_t num_pages) { int i, j; uint8_t *clf = (uint8_t*)vaddr; uint32_t clflush_add = (driver->clflush_add * sizeof(uint32_t)) >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; DRM_INFO("clflush pages %d\n", num_pages); mb(); for (i = 0; i < num_pages; ++i) { for (j = 0; j < clflush_count; ++j) { psb_clflush(clf); clf += clflush_add; } } mb(); } void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) { if (rc_prot) down_write(&driver->sem); if (!driver->dev_priv) goto out; if (driver->mmu_type == IMG_MMU) { atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); #ifndef CONFIG_DRM_VXD_BYT atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1); #endif } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); } out: if (rc_prot) up_write(&driver->sem); } void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) { /*ttm_tt_cache_flush(&pd->p, 1);*/ psb_pages_clflush(pd->driver, &pd->p, 1); down_write(&pd->driver->sem); wmb(); psb_mmu_flush_pd_locked(pd->driver, 1); pd->hw_context = hw_context; up_write(&pd->driver->sem); } static inline unsigned long psb_pd_addr_end(unsigned long addr, unsigned long end) { addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; return (addr < end) ? addr : end; } static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) { uint32_t mask = PSB_PTE_VALID; if (type & PSB_MMU_CACHED_MEMORY) mask |= PSB_PTE_CACHED; if (type & PSB_MMU_RO_MEMORY) mask |= PSB_PTE_RO; if (type & PSB_MMU_WO_MEMORY) mask |= PSB_PTE_WO; return (pfn << PAGE_SHIFT) | mask; } #ifdef SUPPORT_VSP static inline uint32_t vsp_mmu_mask_pte(uint32_t pfn, int type) { return (pfn & VSP_PDE_MASK) | VSP_PTE_VALID; } #endif struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, int trap_pagefaults, int invalid_type) { struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); uint32_t *v; int i; if (!pd) return NULL; pd->p = alloc_page(GFP_DMA32); if (!pd->p) goto out_err1; pd->dummy_pt = alloc_page(GFP_DMA32); if (!pd->dummy_pt) goto out_err2; pd->dummy_page = alloc_page(GFP_DMA32); if (!pd->dummy_page) goto out_err3; if (!trap_pagefaults) { if (driver->mmu_type == IMG_MMU) { pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), invalid_type); pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), invalid_type); } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP pd->invalid_pde = vsp_mmu_mask_pte(page_to_pfn(pd->dummy_pt), invalid_type); pd->invalid_pte = vsp_mmu_mask_pte(page_to_pfn(pd->dummy_page), invalid_type); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); goto out_err4; } } else { pd->invalid_pde = 0; pd->invalid_pte = 0; } v = kmap(pd->dummy_pt); if (!v) goto out_err4; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pte; kunmap(pd->dummy_pt); v = kmap(pd->p); if (!v) goto out_err4; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pde; kunmap(pd->p); v = kmap(pd->dummy_page); if (!v) goto out_err4; clear_page(v); kunmap(pd->dummy_page); pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); if (!pd->tables) goto out_err4; pd->hw_context = -1; pd->pd_mask = PSB_PTE_VALID; pd->driver = driver; return pd; out_err4: __free_page(pd->dummy_page); out_err3: __free_page(pd->dummy_pt); out_err2: __free_page(pd->p); out_err1: kfree(pd); return NULL; } void psb_mmu_free_pt(struct psb_mmu_pt *pt) { __free_page(pt->p); kfree(pt); } void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) { struct psb_mmu_driver *driver = pd->driver; struct psb_mmu_pt *pt; int i; down_write(&driver->sem); if (pd->hw_context != -1) psb_mmu_flush_pd_locked(driver, 1); /* Should take the spinlock here, but we don't need to do that since we have the semaphore in write mode. */ for (i = 0; i < 1024; ++i) { pt = pd->tables[i]; if (pt) psb_mmu_free_pt(pt); } vfree(pd->tables); __free_page(pd->dummy_page); __free_page(pd->dummy_pt); __free_page(pd->p); kfree(pd); up_write(&driver->sem); } static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) { struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); void *v; uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; spinlock_t *lock = &pd->driver->lock; uint8_t *clf; uint32_t *ptes; int i; if (!pt) return NULL; pt->p = alloc_page(GFP_DMA32); if (!pt->p) { kfree(pt); return NULL; } spin_lock(lock); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pt->p, KM_USER0); #else v = kmap_atomic(pt->p); #endif clf = (uint8_t *) v; ptes = (uint32_t *) v; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) *ptes++ = pd->invalid_pte; #if defined(CONFIG_X86) if (pd->driver->has_clflush && pd->hw_context != -1) { mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(v, KM_USER0); #else kunmap_atomic(v); #endif spin_unlock(lock); pt->count = 0; pt->pd = pd; pt->index = 0; return pt; } struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; uint32_t *v; spinlock_t *lock = &pd->driver->lock; struct psb_mmu_driver *driver = pd->driver; spin_lock(lock); pt = pd->tables[index]; while (!pt) { spin_unlock(lock); pt = psb_mmu_alloc_pt(pd); if (!pt) return NULL; spin_lock(lock); if (pd->tables[index]) { spin_unlock(lock); psb_mmu_free_pt(pt); spin_lock(lock); pt = pd->tables[index]; continue; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pd->p, KM_USER0); #else v = kmap_atomic(pd->p); #endif pd->tables[index] = pt; if (driver->mmu_type == IMG_MMU) v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; #ifdef SUPPORT_VSP else if (driver->mmu_type == VSP_MMU) v[index] = (page_to_pfn(pt->p)); #endif else DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); pt->index = index; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic((void *) v, KM_USER0); #else kunmap_atomic((void *) v); #endif if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[index]); atomic_set(&pd->driver->needs_tlbflush, 1); } } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) pt->v = kmap_atomic(pt->p, KM_USER0); #else pt->v = kmap_atomic(pt->p); #endif return pt; } static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; spinlock_t *lock = &pd->driver->lock; spin_lock(lock); pt = pd->tables[index]; if (!pt) { spin_unlock(lock); return NULL; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) pt->v = kmap_atomic(pt->p, KM_USER0); #else pt->v = kmap_atomic(pt->p); #endif return pt; } static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) { struct psb_mmu_pd *pd = pt->pd; uint32_t *v; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(pt->v, KM_USER0); #else kunmap_atomic(pt->v); #endif if (pt->count == 0) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pd->p, KM_USER0); #else v = kmap_atomic(pd->p); #endif v[pt->index] = pd->invalid_pde; pd->tables[pt->index] = NULL; if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[pt->index]); atomic_set(&pd->driver->needs_tlbflush, 1); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(pt->v, KM_USER0); #else kunmap_atomic(pt->v); #endif spin_unlock(&pd->driver->lock); psb_mmu_free_pt(pt); return; } spin_unlock(&pd->driver->lock); } static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr, uint32_t pte) { pt->v[psb_mmu_pt_index(addr)] = pte; } static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, unsigned long addr) { pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; } #if 0 static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd, uint32_t mmu_offset) { uint32_t *v; uint32_t pfn; v = kmap_atomic(pd->p, KM_USER0); if (!v) { printk(KERN_INFO "Could not kmap pde page.\n"); return 0; } pfn = v[psb_mmu_pd_index(mmu_offset)]; /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */ kunmap_atomic(v, KM_USER0); if (((pfn & 0x0F) != PSB_PTE_VALID)) { printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n", mmu_offset, pfn); } v = ioremap(pfn & 0xFFFFF000, 4096); if (!v) { printk(KERN_INFO "Could not kmap pte page.\n"); return 0; } pfn = v[psb_mmu_pt_index(mmu_offset)]; /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */ iounmap(v); if (((pfn & 0x0F) != PSB_PTE_VALID)) { printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n", mmu_offset, pfn); } return pfn >> PAGE_SHIFT; } static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, uint32_t gtt_pages) { uint32_t start; uint32_t next; printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n", mmu_offset, gtt_pages); down_read(&pd->driver->sem); start = psb_mmu_check_pte_locked(pd, mmu_offset); mmu_offset += PAGE_SIZE; gtt_pages -= 1; while (gtt_pages--) { next = psb_mmu_check_pte_locked(pd, mmu_offset); if (next != start + 1) { printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n", start, next); } start = next; mmu_offset += PAGE_SIZE; } up_read(&pd->driver->sem); } void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, uint32_t gtt_start, uint32_t gtt_pages) { uint32_t *v; uint32_t start = psb_mmu_pd_index(mmu_offset); struct psb_mmu_driver *driver = pd->driver; int num_pages = gtt_pages; down_read(&driver->sem); spin_lock(&driver->lock); v = kmap_atomic(pd->p, KM_USER0); v += start; while (gtt_pages--) { *v++ = gtt_start | pd->pd_mask; gtt_start += PAGE_SIZE; } /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); kunmap_atomic(v, KM_USER0); spin_unlock(&driver->lock); if (pd->hw_context != -1) atomic_set(&pd->driver->needs_tlbflush, 1); up_read(&pd->driver->sem); psb_mmu_flush_pd(pd->driver, 0); } #endif struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) { struct psb_mmu_pd *pd; /* down_read(&driver->sem); */ pd = driver->default_pd; /* up_read(&driver->sem); */ return pd; } /* Returns the physical address of the PD shared by sgx/msvdx */ uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) { struct psb_mmu_pd *pd; pd = psb_mmu_get_default_pd(driver); return page_to_pfn(pd->p) << PAGE_SHIFT; } void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) { psb_mmu_free_pagedir(driver->default_pd); kfree(driver); } struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, int trap_pagefaults, int invalid_type, struct drm_psb_private *dev_priv, enum mmu_type_t mmu_type) { struct psb_mmu_driver *driver; driver = kmalloc(sizeof(*driver), GFP_KERNEL); if (!driver) return NULL; driver->dev_priv = dev_priv; driver->mmu_type = mmu_type; driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, invalid_type); if (!driver->default_pd) goto out_err1; spin_lock_init(&driver->lock); init_rwsem(&driver->sem); down_write(&driver->sem); driver->register_map = registers; atomic_set(&driver->needs_tlbflush, 1); driver->has_clflush = 0; #if defined(CONFIG_X86) if (boot_cpu_has(X86_FEATURE_CLFLSH)) { uint32_t tfms, misc, cap0, cap4, clflush_size; /* * clflush size is determined at kernel setup for x86_64 * but not for i386. We have to do it here. */ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); clflush_size = ((misc >> 8) & 0xff) * 8; driver->has_clflush = 1; driver->clflush_add = PAGE_SIZE * clflush_size / sizeof(uint32_t); driver->clflush_mask = driver->clflush_add - 1; driver->clflush_mask = ~driver->clflush_mask; } #endif up_write(&driver->sem); return driver; out_err1: kfree(driver); return NULL; } #if defined(CONFIG_X86) static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long clflush_add = pd->driver->clflush_add; unsigned long clflush_mask = pd->driver->clflush_mask; if (!pd->driver->has_clflush) { /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); return; } if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; mb(); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { psb_clflush(&pt->v [psb_mmu_pt_index(addr)]); } while (addr += clflush_add, (addr & clflush_mask) < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } mb(); } #else static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { drm_ttm_cache_flush(&pd->p, num_pages); } #endif void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages) { struct psb_mmu_pt *pt; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) goto out; do { psb_mmu_invalidate_pte(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); return; } void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; /* down_read(&pd->driver->sem); */ /* Make sure we only need to flush this processor's cache */ for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { psb_mmu_invalidate_pte(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); /* up_read(&pd->driver->sem); */ if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); } int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type) { struct psb_mmu_pt *pt; struct psb_mmu_driver *driver = pd->driver; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; int ret = 0; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { if (driver->mmu_type == IMG_MMU) { pte = psb_mmu_mask_pte(start_pfn++, type); #ifdef SUPPORT_VSP } else if (driver->mmu_type == VSP_MMU) { pte = vsp_mmu_mask_pte(start_pfn++, type); #endif } else { DRM_ERROR("MMU: mmu type invalid %d\n", driver->mmu_type); ret = -EINVAL; goto out; } psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride, int type) { struct psb_mmu_pt *pt; struct psb_mmu_driver *driver = pd->driver; uint32_t rows = 1; uint32_t i; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; int ret = 0; if (hw_tile_stride) { if (num_pages % desired_tile_stride != 0) return -EINVAL; rows = num_pages / desired_tile_stride; } else { desired_tile_stride = num_pages; } add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; down_read(&pd->driver->sem); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { if (driver->mmu_type == IMG_MMU) { pte = psb_mmu_mask_pte( page_to_pfn(*pages++), type); #ifdef SUPPORT_VSP } else if (driver->mmu_type == VSP_MMU) { pte = vsp_mmu_mask_pte( page_to_pfn(*pages++), type); #endif } else { DRM_ERROR("MMU: mmu type invalid %d\n", driver->mmu_type); ret = -EINVAL; goto out; } psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } #if 0 /*comented out, only used in mmu test now*/ void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask) { mask &= _PSB_MMU_ER_MASK; psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask, PSB_CR_BIF_CTRL); (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); } void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask) { mask &= _PSB_MMU_ER_MASK; psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask, PSB_CR_BIF_CTRL); (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); } int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, unsigned long *pfn) { int ret; struct psb_mmu_pt *pt; uint32_t tmp; spinlock_t *lock = &pd->driver->lock; down_read(&pd->driver->sem); pt = psb_mmu_pt_map_lock(pd, virtual); if (!pt) { uint32_t *v; spin_lock(lock); v = kmap_atomic(pd->p, KM_USER0); tmp = v[psb_mmu_pd_index(virtual)]; kunmap_atomic(v, KM_USER0); spin_unlock(lock); if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || !(pd->invalid_pte & PSB_PTE_VALID)) { ret = -EINVAL; goto out; } ret = 0; *pfn = pd->invalid_pte >> PAGE_SHIFT; goto out; } tmp = pt->v[psb_mmu_pt_index(virtual)]; if (!(tmp & PSB_PTE_VALID)) { ret = -EINVAL; } else { ret = 0; *pfn = tmp >> PAGE_SHIFT; } psb_mmu_pt_unmap_unlock(pt); out: up_read(&pd->driver->sem); return ret; } void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset) { struct page *p; unsigned long pfn; int ret = 0; struct psb_mmu_pd *pd; uint32_t *v; uint32_t *vmmu; pd = driver->default_pd; if (!pd) printk(KERN_WARNING "Could not get default pd\n"); p = alloc_page(GFP_DMA32); if (!p) { printk(KERN_WARNING "Failed allocating page\n"); return; } v = kmap(p); memset(v, 0x67, PAGE_SIZE); pfn = (offset >> PAGE_SHIFT); ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0); if (ret) { printk(KERN_WARNING "Failed inserting mmu page\n"); goto out_err1; } /* Ioremap the page through the GART aperture */ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); if (!vmmu) { printk(KERN_WARNING "Failed ioremapping page\n"); goto out_err2; } /* Read from the page with mmu disabled. */ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu)); /* Enable the mmu for host accesses and read again. */ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST); printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n", ioread32(vmmu)); *v = 0x15243705; printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n", ioread32(vmmu)); iowrite32(0x16243355, vmmu); (void) ioread32(vmmu); printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v); printk(KERN_INFO "Int stat is 0x%08x\n", psb_ioread32(driver, PSB_CR_BIF_INT_STAT)); printk(KERN_INFO "Fault is 0x%08x\n", psb_ioread32(driver, PSB_CR_BIF_FAULT)); /* Disable MMU for host accesses and clear page fault register */ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST); iounmap(vmmu); out_err2: psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0); out_err1: kunmap(p); __free_page(p); } #endif /* void psb_mmu_pgtable_dump(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); struct psb_mmu_pt *pt; int i, j; uint32_t flags; uint32_t *v; spinlock_t *lock = &pd->driver->lock; down_read(&pd->driver->sem); spin_lock_irqsave(lock, flags); v = kmap_atomic(pd->p, KM_USER0); if (!v) { printk(KERN_INFO "%s: Kmap pg fail, abort\n", __func__); return; } printk(KERN_INFO "%s: start dump mmu page table\n", __func__); for (i = 0; i < 1024; i++) { pt = pd->tables[i]; if (!pt) { printk(KERN_INFO "pt[%d] is NULL, 0x%08x\n", i, v[i]); continue; } printk(KERN_INFO "pt[%d] is 0x%08x\n", i, v[i]); pt->v = kmap_atomic(pt->p, KM_USER0); if (!(pt->v)) { printk(KERN_INFO "%s: Kmap fail, abort\n", __func__); break; } for (j = 0; j < 1024; j++) { if (!(j%16)) printk(KERN_INFO "pte%d:", j); uint32_t pte = pt->v[j]; printk("%08xh ", pte); //if ((j%16) == 15) //printk(KERN_INFO "\n"); } kunmap_atomic(pt->v, KM_USER0); } spin_unlock_irqrestore(lock, flags); up_read(&pd->driver->sem); kunmap_atomic((void *) v, KM_USER0); printk(KERN_INFO "%s: finish dump mmu page table\n", __func__); } */ int psb_ttm_bo_clflush(struct psb_mmu_driver *mmu, struct ttm_buffer_object *bo) { int ret = 0; bool is_iomem; void *addr; struct ttm_bo_kmap_obj bo_kmap; if (unlikely(!mmu || !bo)) { DRM_ERROR("NULL pointer, mmu:%p bo:%p\n", mmu, bo); return 1; } /*map surface parameters*/ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &bo_kmap); if (ret) { DRM_ERROR("ttm_bo_kmap failed: %d.\n", ret); return ret; } addr = (void *)ttm_kmap_obj_virtual(&bo_kmap, &is_iomem); if (unlikely(!addr)) { DRM_ERROR("failed to ttm_kmap_obj_virtual\n"); ret = 1; } psb_virtual_addr_clflush(mmu, addr, bo->num_pages); ttm_bo_kunmap(&bo_kmap); return ret; }
BenzoPlayer/kernel_asus_fugu
drivers/staging/imgtec/intel/video/common/psb_mmu.c
C
gpl-2.0
29,451
/* * Copyright (C) 2011-2014 MediaTek Inc. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU General Public License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with this program. * If not, see <http://www.gnu.org/licenses/>. */ #include <accdet_hal.h> #include <mach/mt_boot.h> #include <cust_eint.h> #include <cust_gpio_usage.h> #include <mach/mt_gpio.h> //#include "accdet_drv.h" static struct platform_driver accdet_driver; static int debug_enable_drv = 1; #define ACCDET_DEBUG_DRV(format, args...) do{ \ if(debug_enable_drv) \ {\ printk(KERN_WARNING format,##args);\ }\ }while(0) static long accdet_unlocked_ioctl(struct file *file, unsigned int cmd,unsigned long arg) { return mt_accdet_unlocked_ioctl(cmd, arg); } static int accdet_open(struct inode *inode, struct file *file) { return 0; } static int accdet_release(struct inode *inode, struct file *file) { return 0; } static struct file_operations accdet_fops = { .owner = THIS_MODULE, .unlocked_ioctl = accdet_unlocked_ioctl, .open = accdet_open, .release = accdet_release, }; struct file_operations *accdet_get_fops(void) { return &accdet_fops; } static int accdet_probe(struct platform_device *dev) { mt_accdet_probe(); return 0; } static int accdet_remove(struct platform_device *dev) { mt_accdet_remove(); return 0; } static int accdet_suspend(struct device *device) // wake up { mt_accdet_suspend(); return 0; } static int accdet_resume(struct device *device) // wake up { mt_accdet_resume(); return 0; } /********************************************************************** //add for IPO-H need update headset state when resume ***********************************************************************/ #ifdef CONFIG_PM static int accdet_pm_restore_noirq(struct device *device) { mt_accdet_pm_restore_noirq(); return 0; } static struct dev_pm_ops accdet_pm_ops = { .suspend = accdet_suspend, .resume = accdet_resume, .restore_noirq = accdet_pm_restore_noirq, }; #endif static struct platform_driver accdet_driver = { .probe = accdet_probe, //.suspend = accdet_suspend, //.resume = accdet_resume, .remove = accdet_remove, .driver = { .name = "Accdet_Driver", #ifdef CONFIG_PM .pm = &accdet_pm_ops, #endif }, }; struct platform_driver accdet_driver_func(void) { return accdet_driver; } static int accdet_mod_init(void) { int ret = 0; ACCDET_DEBUG_DRV("[Accdet]accdet_mod_init begin!\n"); //------------------------------------------------------------------ // Accdet PM //------------------------------------------------------------------ ret = platform_driver_register(&accdet_driver); if (ret) { ACCDET_DEBUG_DRV("[Accdet]platform_driver_register error:(%d)\n", ret); return ret; } else { ACCDET_DEBUG_DRV("[Accdet]platform_driver_register done!\n"); } ACCDET_DEBUG_DRV("[Accdet]accdet_mod_init done!\n"); return 0; } static void accdet_mod_exit(void) { ACCDET_DEBUG_DRV("[Accdet]accdet_mod_exit\n"); platform_driver_unregister(&accdet_driver); ACCDET_DEBUG_DRV("[Accdet]accdet_mod_exit Done!\n"); } /*Patch for CR ALPS00804150 & ALPS00804802 PMIC temp not correct issue*/ int accdet_cable_type_state(void) { //ACCDET_DEBUG("[ACCDET] accdet_cable_type_state=%d\n",accdet_get_cable_type()); return accdet_get_cable_type(); } EXPORT_SYMBOL(accdet_cable_type_state); /*Patch for CR ALPS00804150 & ALPS00804802 PMIC temp not correct issue*/ module_init(accdet_mod_init); module_exit(accdet_mod_exit); module_param(debug_enable_drv,int,0644); MODULE_DESCRIPTION("MTK MT6588 ACCDET driver"); MODULE_AUTHOR("Anny <Anny.Hu@mediatek.com>"); MODULE_LICENSE("GPL");
vredniiy/sprout
drivers/misc/mediatek/accdet/accdet_drv.c
C
gpl-2.0
4,055
/* * linux/kernel/seccomp.c * * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com> * * Copyright (C) 2012 Google, Inc. * Will Drewry <wad@chromium.org> * * This defines a simple but solid secure-computing facility. * * Mode 1 uses a fixed list of allowed system calls. * Mode 2 allows user-defined system call filters in the form * of Berkeley Packet Filters/Linux Socket Filters. */ #include <linux/atomic.h> #include <linux/audit.h> #include <linux/compat.h> #include <linux/sched.h> #include <linux/seccomp.h> #include <linux/slab.h> #include <linux/syscalls.h> /* #define SECCOMP_DEBUG 1 */ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER #include <asm/syscall.h> #endif #ifdef CONFIG_SECCOMP_FILTER #include <linux/filter.h> #include <linux/pid.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/tracehook.h> #include <linux/uaccess.h> /** * struct seccomp_filter - container for seccomp BPF programs * * @usage: reference count to manage the object lifetime. * get/put helpers should be used when accessing an instance * outside of a lifetime-guarded section. In general, this * is only needed for handling filters shared across tasks. * @prev: points to a previously installed, or inherited, filter * @len: the number of instructions in the program * @insns: the BPF program instructions to evaluate * * seccomp_filter objects are organized in a tree linked via the @prev * pointer. For any task, it appears to be a singly-linked list starting * with current->seccomp.filter, the most recently attached or inherited filter. * However, multiple filters may share a @prev node, by way of fork(), which * results in a unidirectional tree existing in memory. This is similar to * how namespaces work. * * seccomp_filter objects should never be modified after being attached * to a task_struct (other than @usage). */ struct seccomp_filter { atomic_t usage; struct seccomp_filter *prev; unsigned short len; /* Instruction count */ struct sock_filter insns[]; }; /* Limit any path through the tree to 256KB worth of instructions. */ #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) /** * get_u32 - returns a u32 offset into data * @data: a unsigned 64 bit value * @index: 0 or 1 to return the first or second 32-bits * * This inline exists to hide the length of unsigned long. If a 32-bit * unsigned long is passed in, it will be extended and the top 32-bits will be * 0. If it is a 64-bit unsigned long, then whatever data is resident will be * properly returned. * * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ static inline u32 get_u32(u64 data, int index) { return ((u32 *)&data)[index]; } /* Helper for bpf_load below. */ #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) /** * bpf_load: checks and returns a pointer to the requested offset * @off: offset into struct seccomp_data to load from * * Returns the requested 32-bits of data. * seccomp_check_filter() should assure that @off is 32-bit aligned * and not out of bounds. Failure to do so is a BUG. */ u32 seccomp_bpf_load(int off) { struct pt_regs *regs = task_pt_regs(current); if (off == BPF_DATA(nr)) return syscall_get_nr(current, regs); if (off == BPF_DATA(arch)) return syscall_get_arch(); if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { unsigned long value; int arg = (off - BPF_DATA(args[0])) / sizeof(u64); int index = !!(off % sizeof(u64)); syscall_get_arguments(current, regs, arg, 1, &value); return get_u32(value, index); } if (off == BPF_DATA(instruction_pointer)) return get_u32(KSTK_EIP(current), 0); if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) return get_u32(KSTK_EIP(current), 1); /* seccomp_check_filter should make this impossible. */ BUG(); } /** * seccomp_check_filter - verify seccomp filter code * @filter: filter to verify * @flen: length of filter * * Takes a previously checked filter (by sk_chk_filter) and * redirects all filter code that loads struct sk_buff data * and related data through seccomp_bpf_load. It also * enforces length and alignment checking of those loads. * * Returns 0 if the rule set is legal or -EINVAL if not. */ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) { int pc; for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; u16 code = ftest->code; u32 k = ftest->k; switch (code) { case BPF_S_LD_W_ABS: ftest->code = BPF_S_ANC_SECCOMP_LD_W; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; case BPF_S_LD_W_LEN: ftest->code = BPF_S_LD_IMM; ftest->k = sizeof(struct seccomp_data); continue; case BPF_S_LDX_W_LEN: ftest->code = BPF_S_LDX_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ case BPF_S_RET_K: case BPF_S_RET_A: case BPF_S_ALU_ADD_K: case BPF_S_ALU_ADD_X: case BPF_S_ALU_SUB_K: case BPF_S_ALU_SUB_X: case BPF_S_ALU_MUL_K: case BPF_S_ALU_MUL_X: case BPF_S_ALU_DIV_X: case BPF_S_ALU_AND_K: case BPF_S_ALU_AND_X: case BPF_S_ALU_OR_K: case BPF_S_ALU_OR_X: case BPF_S_ALU_XOR_K: case BPF_S_ALU_XOR_X: case BPF_S_ALU_LSH_K: case BPF_S_ALU_LSH_X: case BPF_S_ALU_RSH_K: case BPF_S_ALU_RSH_X: case BPF_S_ALU_NEG: case BPF_S_LD_IMM: case BPF_S_LDX_IMM: case BPF_S_MISC_TAX: case BPF_S_MISC_TXA: case BPF_S_ALU_DIV_K: case BPF_S_LD_MEM: case BPF_S_LDX_MEM: case BPF_S_ST: case BPF_S_STX: case BPF_S_JMP_JA: case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JEQ_X: case BPF_S_JMP_JGE_K: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGT_X: case BPF_S_JMP_JSET_K: case BPF_S_JMP_JSET_X: continue; default: return -EINVAL; } } return 0; } /** * seccomp_run_filters - evaluates all seccomp filters against @syscall * @syscall: number of the current system call * * Returns valid seccomp BPF response codes. */ static u32 seccomp_run_filters(void) { struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter); u32 ret = SECCOMP_RET_ALLOW; /* Ensure unexpected behavior doesn't result in failing open. */ if (unlikely(WARN_ON(f == NULL))) return SECCOMP_RET_KILL; /* Make sure cross-thread synced filter points somewhere sane. */ smp_read_barrier_depends(); /* * All filters in the list are evaluated and the lowest BPF return * value always takes priority (ignoring the DATA). */ for (; f; f = f->prev) { u32 cur_ret = sk_run_filter(NULL, f->insns); if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) ret = cur_ret; } return ret; } #endif /* CONFIG_SECCOMP_FILTER */ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) { assert_spin_locked(&current->sighand->siglock); if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) return false; return true; } static inline void seccomp_assign_mode(struct task_struct *task, unsigned long seccomp_mode) { assert_spin_locked(&task->sighand->siglock); task->seccomp.mode = seccomp_mode; /* * Make sure TIF_SECCOMP cannot be set before the mode (and * filter) is set. */ smp_mb(); set_tsk_thread_flag(task, TIF_SECCOMP); } #ifdef CONFIG_SECCOMP_FILTER /* Returns 1 if the parent is an ancestor of the child. */ static int is_ancestor(struct seccomp_filter *parent, struct seccomp_filter *child) { /* NULL is the root ancestor. */ if (parent == NULL) return 1; for (; child; child = child->prev) if (child == parent) return 1; return 0; } /** * seccomp_can_sync_threads: checks if all threads can be synchronized * * Expects sighand and cred_guard_mutex locks to be held. * * Returns 0 on success, -ve on error, or the pid of a thread which was * either not in the correct seccomp mode or it did not have an ancestral * seccomp filter. */ static inline pid_t seccomp_can_sync_threads(void) { struct task_struct *thread, *caller; BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex)); assert_spin_locked(&current->sighand->siglock); /* Validate all threads being eligible for synchronization. */ caller = current; for_each_thread(caller, thread) { pid_t failed; /* Skip current, since it is initiating the sync. */ if (thread == caller) continue; if (thread->seccomp.mode == SECCOMP_MODE_DISABLED || (thread->seccomp.mode == SECCOMP_MODE_FILTER && is_ancestor(thread->seccomp.filter, caller->seccomp.filter))) continue; /* Return the first thread that cannot be synchronized. */ failed = task_pid_vnr(thread); /* If the pid cannot be resolved, then return -ESRCH */ if (unlikely(WARN_ON(failed == 0))) failed = -ESRCH; return failed; } return 0; } /** * seccomp_sync_threads: sets all threads to use current's filter * * Expects sighand and cred_guard_mutex locks to be held, and for * seccomp_can_sync_threads() to have returned success already * without dropping the locks. * */ static inline void seccomp_sync_threads(void) { struct task_struct *thread, *caller; BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex)); assert_spin_locked(&current->sighand->siglock); /* Synchronize all threads. */ caller = current; for_each_thread(caller, thread) { /* Skip current, since it needs no changes. */ if (thread == caller) continue; /* Get a task reference for the new leaf node. */ get_seccomp_filter(caller); /* * Drop the task reference to the shared ancestor since * current's path will hold a reference. (This also * allows a put before the assignment.) */ put_seccomp_filter(thread); smp_store_release(&thread->seccomp.filter, caller->seccomp.filter); /* * Opt the other thread into seccomp if needed. * As threads are considered to be trust-realm * equivalent (see ptrace_may_access), it is safe to * allow one thread to transition the other. */ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) { /* * Don't let an unprivileged task work around * the no_new_privs restriction by creating * a thread that sets it up, enters seccomp, * then dies. */ if (task_no_new_privs(caller)) task_set_no_new_privs(thread); seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); } } } /** * seccomp_prepare_filter: Prepares a seccomp filter for use. * @fprog: BPF program to install * * Returns filter on success or an ERR_PTR on failure. */ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) { struct seccomp_filter *filter; unsigned long fp_size = fprog->len * sizeof(struct sock_filter); unsigned long total_insns = fprog->len; long ret; if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) return ERR_PTR(-EINVAL); BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter)); for (filter = current->seccomp.filter; filter; filter = filter->prev) total_insns += filter->len + 4; /* include a 4 instr penalty */ if (total_insns > MAX_INSNS_PER_PATH) return ERR_PTR(-ENOMEM); /* * Installing a seccomp filter requires that the task have * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. * This avoids scenarios where unprivileged tasks can affect the * behavior of privileged children. */ if (!task_no_new_privs(current) && security_capable_noaudit(current_cred(), current_user_ns(), CAP_SYS_ADMIN) != 0) return ERR_PTR(-EACCES); /* Allocate a new seccomp_filter */ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, GFP_KERNEL|__GFP_NOWARN); if (!filter) return ERR_PTR(-ENOMEM);; atomic_set(&filter->usage, 1); filter->len = fprog->len; /* Copy the instructions from fprog. */ ret = -EFAULT; if (copy_from_user(filter->insns, fprog->filter, fp_size)) goto fail; /* Check and rewrite the fprog via the skb checker */ ret = sk_chk_filter(filter->insns, filter->len); if (ret) goto fail; /* Check and rewrite the fprog for seccomp use */ ret = seccomp_check_filter(filter->insns, filter->len); if (ret) goto fail; return filter; fail: kfree(filter); return ERR_PTR(ret); } /** * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog * @user_filter: pointer to the user data containing a sock_fprog. * * Returns 0 on success and non-zero otherwise. */ static struct seccomp_filter * seccomp_prepare_user_filter(const char __user *user_filter) { struct sock_fprog fprog; struct seccomp_filter *filter = ERR_PTR(-EFAULT); #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sock_fprog fprog32; if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) goto out; fprog.len = fprog32.len; fprog.filter = compat_ptr(fprog32.filter); } else /* falls through to the if below. */ #endif if (copy_from_user(&fprog, user_filter, sizeof(fprog))) goto out; filter = seccomp_prepare_filter(&fprog); out: return filter; } /** * seccomp_attach_filter: validate and attach filter * @flags: flags to change filter behavior * @filter: seccomp filter to add to the current process * * Caller must be holding current->sighand->siglock lock. * * Returns 0 on success, -ve on error. */ static long seccomp_attach_filter(unsigned int flags, struct seccomp_filter *filter) { unsigned long total_insns; struct seccomp_filter *walker; assert_spin_locked(&current->sighand->siglock); /* Validate resulting filter length. */ total_insns = filter->len; for (walker = current->seccomp.filter; walker; walker = walker->prev) total_insns += walker->len + 4; /* 4 instr penalty */ if (total_insns > MAX_INSNS_PER_PATH) return -ENOMEM; /* If thread sync has been requested, check that it is possible. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC) { int ret; ret = seccomp_can_sync_threads(); if (ret) return ret; } /* * If there is an existing filter, make it the prev and don't drop its * task reference. */ filter->prev = current->seccomp.filter; current->seccomp.filter = filter; /* Now that the new filter is in place, synchronize to all threads. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC) seccomp_sync_threads(); return 0; } /* get_seccomp_filter - increments the reference count of the filter on @tsk */ void get_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; if (!orig) return; /* Reference count is bounded by the number of total processes. */ atomic_inc(&orig->usage); } static inline void seccomp_filter_free(struct seccomp_filter *filter) { if (filter) { kfree(filter); } } /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ void put_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; /* Clean up single-reference branches iteratively. */ while (orig && atomic_dec_and_test(&orig->usage)) { struct seccomp_filter *freeme = orig; orig = orig->prev; seccomp_filter_free(freeme); } } /** * seccomp_send_sigsys - signals the task to allow in-process syscall emulation * @syscall: syscall number to send to userland * @reason: filter-supplied reason code to send to userland (via si_errno) * * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. */ static void seccomp_send_sigsys(int syscall, int reason) { struct siginfo info; memset(&info, 0, sizeof(info)); info.si_signo = SIGSYS; info.si_code = SYS_SECCOMP; info.si_call_addr = (void __user *)KSTK_EIP(current); info.si_errno = reason; info.si_arch = syscall_get_arch(); info.si_syscall = syscall; force_sig_info(SIGSYS, &info, current); } #endif /* CONFIG_SECCOMP_FILTER */ /* * Secure computing mode 1 allows only read/write/exit/sigreturn. * To be fully secure this must be combined with rlimit * to limit the stack allocations too. */ static int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, 0, /* null terminated */ }; #ifdef CONFIG_COMPAT static int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, 0, /* null terminated */ }; #endif static void __secure_computing_strict(int this_syscall) { int *syscall_whitelist = mode1_syscalls; #ifdef CONFIG_COMPAT if (is_compat_task()) syscall_whitelist = mode1_syscalls_32; #endif do { if (*syscall_whitelist == this_syscall) return; } while (*++syscall_whitelist); #ifdef SECCOMP_DEBUG dump_stack(); #endif audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL); do_exit(SIGKILL); } #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER void secure_computing_strict(int this_syscall) { int mode = current->seccomp.mode; if (mode == 0) return; else if (mode == SECCOMP_MODE_STRICT) __secure_computing_strict(this_syscall); else BUG(); } #else int __secure_computing(void) { struct pt_regs *regs = task_pt_regs(current); int this_syscall = syscall_get_nr(current, regs); int exit_sig = 0; u32 ret; /* * Make sure that any changes to mode from another thread have * been seen after TIF_SECCOMP was seen. */ rmb(); switch (current->seccomp.mode) { case SECCOMP_MODE_STRICT: __secure_computing_strict(this_syscall); return 0; #ifdef CONFIG_SECCOMP_FILTER case SECCOMP_MODE_FILTER: { int data; ret = seccomp_run_filters(); data = ret & SECCOMP_RET_DATA; ret &= SECCOMP_RET_ACTION; switch (ret) { case SECCOMP_RET_ERRNO: /* Set low-order bits as an errno, capped at MAX_ERRNO. */ if (data > MAX_ERRNO) data = MAX_ERRNO; syscall_set_return_value(current, regs, -data, 0); goto skip; case SECCOMP_RET_TRAP: /* Show the handler the original registers. */ syscall_rollback(current, regs); /* Let the filter pass back 16 bits of data. */ seccomp_send_sigsys(this_syscall, data); goto skip; case SECCOMP_RET_TRACE: /* Skip these calls if there is no tracer. */ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { syscall_set_return_value(current, regs, -ENOSYS, 0); goto skip; } /* Allow the BPF to provide the event message */ ptrace_event(PTRACE_EVENT_SECCOMP, data); /* * The delivery of a fatal signal during event * notification may silently skip tracer notification. * Terminating the task now avoids executing a system * call that may not be intended. */ if (fatal_signal_pending(current)) break; if (syscall_get_nr(current, regs) < 0) goto skip; /* Explicit request to skip. */ return 0; case SECCOMP_RET_ALLOW: return 0; case SECCOMP_RET_KILL: default: break; } exit_sig = SIGSYS; break; } #endif default: BUG(); } #ifdef SECCOMP_DEBUG dump_stack(); #endif audit_seccomp(this_syscall, exit_sig, ret); do_exit(exit_sig); #ifdef CONFIG_SECCOMP_FILTER skip: audit_seccomp(this_syscall, exit_sig, ret); return -1; #endif } #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */ long prctl_get_seccomp(void) { return current->seccomp.mode; } /** * seccomp_set_mode_strict: internal function for setting strict seccomp * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ static long seccomp_set_mode_strict(void) { const unsigned long seccomp_mode = SECCOMP_MODE_STRICT; long ret = -EINVAL; spin_lock_irq(&current->sighand->siglock); if (!seccomp_may_assign_mode(seccomp_mode)) goto out; #ifdef TIF_NOTSC disable_TSC(); #endif seccomp_assign_mode(current, seccomp_mode); ret = 0; out: spin_unlock_irq(&current->sighand->siglock); return ret; } #ifdef CONFIG_SECCOMP_FILTER /** * seccomp_set_mode_filter: internal function for setting seccomp filter * @flags: flags to change filter behavior * @filter: struct sock_fprog containing filter * * This function may be called repeatedly to install additional filters. * Every filter successfully installed will be evaluated (in reverse order) * for each system call the task makes. * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ static long seccomp_set_mode_filter(unsigned int flags, const char __user *filter) { const unsigned long seccomp_mode = SECCOMP_MODE_FILTER; struct seccomp_filter *prepared = NULL; long ret = -EINVAL; /* Validate flags. */ if (flags & ~SECCOMP_FILTER_FLAG_MASK) return -EINVAL; /* Prepare the new filter before holding any locks. */ prepared = seccomp_prepare_user_filter(filter); if (IS_ERR(prepared)) return PTR_ERR(prepared); /* * Make sure we cannot change seccomp or nnp state via TSYNC * while another thread is in the middle of calling exec. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC && mutex_lock_killable(&current->signal->cred_guard_mutex)) goto out_free; spin_lock_irq(&current->sighand->siglock); if (!seccomp_may_assign_mode(seccomp_mode)) goto out; ret = seccomp_attach_filter(flags, prepared); if (ret) goto out; /* Do not free the successfully attached filter. */ prepared = NULL; seccomp_assign_mode(current, seccomp_mode); out: spin_unlock_irq(&current->sighand->siglock); if (flags & SECCOMP_FILTER_FLAG_TSYNC) mutex_unlock(&current->signal->cred_guard_mutex); out_free: seccomp_filter_free(prepared); return ret; } #else static inline long seccomp_set_mode_filter(unsigned int flags, const char __user *filter) { return -EINVAL; } #endif /* Common entry point for both prctl and syscall. */ static long do_seccomp(unsigned int op, unsigned int flags, const char __user *uargs) { switch (op) { case SECCOMP_SET_MODE_STRICT: if (flags != 0 || uargs != NULL) return -EINVAL; return seccomp_set_mode_strict(); case SECCOMP_SET_MODE_FILTER: return seccomp_set_mode_filter(flags, uargs); default: return -EINVAL; } } SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) { return do_seccomp(op, flags, uargs); } /** * prctl_set_seccomp: configures current->seccomp.mode * @seccomp_mode: requested mode to use * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER * * Returns 0 on success or -EINVAL on failure. */ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) { unsigned int op; char __user *uargs; switch (seccomp_mode) { case SECCOMP_MODE_STRICT: op = SECCOMP_SET_MODE_STRICT; /* * Setting strict mode through prctl always ignored filter, * so make sure it is always NULL here to pass the internal * check in do_seccomp(). */ uargs = NULL; break; case SECCOMP_MODE_FILTER: op = SECCOMP_SET_MODE_FILTER; uargs = filter; break; default: return -EINVAL; } /* prctl interface doesn't have flags, so they are always zero. */ return do_seccomp(op, 0, uargs); }
Elite-Kernels/Elite_angler
kernel/seccomp.c
C
gpl-2.0
22,899
/*--------------------------------------------------------------------*/ /*--- Platform-specific syscalls stuff. syswrap-ppc64-aix5.c ---*/ /*--------------------------------------------------------------------*/ /* This file is part of Valgrind, a dynamic binary instrumentation framework. Copyright (C) 2006-2010 OpenWorks LLP info@open-works.co.uk This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. The GNU General Public License is contained in the file COPYING. Neither the names of the U.S. Department of Energy nor the University of California nor the names of its contributors may be used to endorse or promote products derived from this software without prior written permission. */ #if defined(VGP_ppc64_aix5) #include "pub_core_basics.h" #include "pub_core_vki.h" #include "pub_core_vkiscnums.h" #include "pub_core_threadstate.h" #include "pub_core_debuglog.h" #include "pub_core_libcassert.h" #include "pub_core_libcprint.h" #include "pub_core_libcproc.h" #include "pub_core_options.h" #include "pub_core_scheduler.h" #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)() #include "pub_core_signals.h" #include "pub_core_syscall.h" #include "pub_core_syswrap.h" #include "pub_core_tooliface.h" #include "priv_types_n_macros.h" #include "priv_syswrap-aix5.h" /* for decls of aix5-common wrappers */ #include "priv_syswrap-main.h" /* --------- HACKS --------- */ /* XXXXXXXXXXXX these HACKS are copies of stuff in syswrap-linux.c; check for duplication. */ /* HACK: is in syswrap-generic.c, but that doesn't get build on AIX. */ /* Dump out a summary, and a more detailed list, of open file descriptors. */ void VG_(show_open_fds) ( void ) { I_die_here; } static Bool i_am_the_only_thread ( void ) { Int c = VG_(count_living_threads)(); vg_assert(c >= 1); /* stay sane */ return c == 1; } void VG_(reap_threads)(ThreadId self) { while (!i_am_the_only_thread()) { /* Let other thread(s) run */ VG_(vg_yield)(); VG_(poll_signals)(self); } vg_assert(i_am_the_only_thread()); } void VG_(init_preopened_fds) ( void ) { I_die_here; } // Run a thread from beginning to end and return the thread's // scheduler-return-code. static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW) { VgSchedReturnCode ret; ThreadId tid = (ThreadId)tidW; ThreadState* tst = VG_(get_ThreadState)(tid); VG_(debugLog)(1, "syswrap-aix64", "thread_wrapper(tid=%lld): entry\n", (ULong)tidW); vg_assert(tst->status == VgTs_Init); /* make sure we get the CPU lock before doing anything significant */ VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)"); if (0) VG_(printf)("thread tid %d started: stack = %p\n", tid, &tid); VG_TRACK( pre_thread_first_insn, tid ); tst->os_state.lwpid = VG_(gettid)(); tst->os_state.threadgroup = VG_(getpid)(); /* Thread created with all signals blocked; scheduler will set the appropriate mask */ ret = VG_(scheduler)(tid); vg_assert(VG_(is_exiting)(tid)); vg_assert(tst->status == VgTs_Runnable); vg_assert(VG_(is_running_thread)(tid)); VG_(debugLog)(1, "syswrap-aix64", "thread_wrapper(tid=%lld): exit\n", (ULong)tidW); /* Return to caller, still holding the lock. */ return ret; } /* Run a thread all the way to the end, then do appropriate exit actions (this is the last-one-out-turn-off-the-lights bit). */ static void run_a_thread_NORETURN ( Word tidW ) { ThreadId tid = (ThreadId)tidW; VgSchedReturnCode src; Int c; VG_(debugLog)(1, "syswrap-aix64", "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n", (ULong)tidW); /* Run the thread all the way through. */ src = thread_wrapper(tid); VG_(debugLog)(1, "syswrap-aix64", "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n", (ULong)tidW); c = VG_(count_living_threads)(); vg_assert(c >= 1); /* stay sane */ vg_assert(src == VgSrc_ExitThread || src == VgSrc_ExitProcess || src == VgSrc_FatalSig); if (c == 1 || src == VgSrc_ExitProcess) { VG_(debugLog)(1, "syswrap-aix64", "run_a_thread_NORETURN(tid=%lld): " "exit process (%d threads remaining)\n", (ULong)tidW, c); /* We are the last one standing. Keep hold of the lock and carry on to show final tool results, then exit the entire system. Use the continuation pointer set at startup in m_main. */ ( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src); } else { ThreadState *tst; VG_(debugLog)(1, "syswrap-aix64", "run_a_thread_NORETURN(tid=%lld): " "not last one standing\n", (ULong)tidW); /* OK, thread is dead, but others still exist. Just exit. */ vg_assert(c >= 2); tst = VG_(get_ThreadState)(tid); /* This releases the run lock */ VG_(exit_thread)(tid); vg_assert(tst->status == VgTs_Zombie); /* We have to use this sequence to terminate the thread to prevent a subtle race. If VG_(exit_thread)() had left the ThreadState as Empty, then it could have been reallocated, reusing the stack while we're doing these last cleanups. Instead, VG_(exit_thread) leaves it as Zombie to prevent reallocation. We need to make sure we don't touch the stack between marking it Empty and exiting. Hence the assembler. */ { ULong block[4]; vg_assert(sizeof(tst->status == 8)); vg_assert(__NR_AIX5_thread_terminate != __NR_AIX5_UNKNOWN); block[0] = (ULong)VgTs_Empty; block[1] = (ULong) & (tst->status); block[2] = (ULong) tst->os_state.exitcode; block[3] = __NR_AIX5_thread_terminate; asm volatile ( "mr 29,%0\n\t" /* r29 = &block[0] */ "ld 20, 0(29)\n\t" /* r20 = VgTs_Empty */ "ld 21, 8(29)\n\t" /* r21 = & (tst->status) */ "ld 22, 16(29)\n\t" /* r22 = tst->os_state.exitcode */ "ld 23, 24(29)\n\t" /* r23 = __NR_exit */ /* after this point we can't safely use the stack. */ "std 20, 0(21)\n\t" /* tst->status = VgTs_Empty */ "mr 2,23\n\t" /* r2 = __NR_exit */ "mr 3,22\n\t" /* set r3 = tst->os_state.exitcode */ /* set up for syscall */ "crorc 6,6,6\n\t" ".long 0x48000005\n\t" /* "bl here+4" */ "mflr 29\n\t" "addi 29,29,16\n\t" "mtlr 29\n\t" "sc\n\t" /* exit(tst->os_state.exitcode) */ : : "b" (&block[0]) : "lr", "memory", "r2", "r3", "r20", "r21", "r22", "r23", "r29" ); } VG_(core_panic)("Thread exit failed?\n"); } /*NOTREACHED*/ vg_assert(0); } static Word start_thread_NORETURN ( void* arg ) { ThreadState* tst = (ThreadState*)arg; ThreadId tid = tst->tid; run_a_thread_NORETURN ( (Word)tid ); /*NOTREACHED*/ vg_assert(0); } /* Call f(arg1), but first switch stacks, using 'stack' as the new stack. f itself needs to never return. */ __attribute__((noreturn)) static void call_on_new_stack_0_1_NORETURN ( Addr stack, void (*f_NORETURN)(Word), Word arg1 ) { UWord* fdescr = (UWord*)f_NORETURN; volatile UWord block[5]; block[0] = fdescr[0]; /* nia */ block[1] = stack; /* r1 */ block[2] = fdescr[1]; /* r2 */ block[3] = arg1; /* r3 */ block[4] = fdescr[2]; /* r11 */ __asm__ __volatile__( "mr 4,%0\n\t" /* r4 = block */ "ld 1, 8(4)\n\t" "ld 2, 16(4)\n\t" "ld 3, 24(4)\n\t" "ld 11,32(4)\n\t" "ld 4, 0(4)\n\t" "mtctr 4\n\t" "bctr\n" : /*out*/ : /*in*/ "b"(&block[0]) ); /*NOTREACHED*/ __asm__ __volatile__("trap"); while (1) {} /* convince gcc that this really doesn't return */ } /* Allocate a stack for the main thread, and run it all the way to the end. Although we already have a working VgStack (VG_(interim_stack)) it's better to allocate a new one, so that overflow detection works uniformly for all threads. */ void VG_(main_thread_wrapper_NORETURN)(ThreadId tid) { Addr sp; VG_(debugLog)(1, "syswrap-aix64", "entering VG_(main_thread_wrapper_NORETURN)\n"); sp = ML_(allocstack)(tid); /* If we can't even allocate the first thread's stack, we're hosed. Give up. */ vg_assert2(sp != 0, "Cannot allocate main thread's stack."); /* shouldn't be any other threads around yet */ vg_assert( VG_(count_living_threads)() == 1 ); /* make a stack frame */ sp -= 16; sp &= ~0xF; *(UWord *)sp = 0; call_on_new_stack_0_1_NORETURN( (Addr)sp, /* stack */ run_a_thread_NORETURN, /* fn to call */ (Word)tid /* arg to give it */ ); /*NOTREACHED*/ vg_assert(0); } /* --------- end HACKS --------- */ /* --------------------------------------------------------------------- More thread stuff ------------------------------------------------------------------ */ void VG_(cleanup_thread) ( ThreadArchState* arch ) { } /* --------------------------------------------------------------------- PRE/POST wrappers for ppc64/AIX5-specific syscalls ------------------------------------------------------------------ */ /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */ #include <sys/thread.h> /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */ /* Add prototypes for the wrappers declared here, so that gcc doesn't harass us for not having prototypes. Really this is a kludge -- the right thing to do is to make these wrappers 'static' since they aren't visible outside this file, but that requires even more macro magic. */ #define PRE(name) DEFN_PRE_TEMPLATE(ppc64_aix5, name) #define POST(name) DEFN_POST_TEMPLATE(ppc64_aix5, name) DECL_TEMPLATE(ppc64_aix5, sys__clock_gettime); DECL_TEMPLATE(ppc64_aix5, sys__fp_fpscrx64_); DECL_TEMPLATE(ppc64_aix5, sys_kload); DECL_TEMPLATE(ppc64_aix5, sys_kunload64); DECL_TEMPLATE(ppc64_aix5, sys_thread_setstate); DECL_TEMPLATE(ppc64_aix5, sys_FAKE_SIGRETURN); PRE(sys__clock_gettime) { /* Seems like ARG2 points at a destination buffer? */ /* _clock_gettime (UNDOCUMENTED) ( 0, 0xA, 0x2FF21808 ) */ PRINT("_clock_gettime (UNDOCUMENTED) ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3 ); PRE_REG_READ3(int, "_clock_gettime", int, arg1, int, arg2, void*, arg3); PRE_MEM_WRITE( "_clock_gettime(dst)", ARG2, sizeof(struct timespec) ); } POST(sys__clock_gettime) { vg_assert(SUCCESS); POST_MEM_WRITE( ARG2, sizeof(struct timespec) ); } PRE(sys__fp_fpscrx64_) { PRINT("_fp_fpscrx64_ (BOGUS HANDLER)"); } PRE(sys_kload) { PRINT("kload (UNDOCUMENTED)( %#lx(%s), %ld, %ld )", ARG1,(Char*)ARG1, ARG2, ARG3 ); PRE_REG_READ3(void*, "kload", char*, name, long, arg2, char*, arg3); } POST(sys_kload) { vg_assert(SUCCESS); if (0) VG_(printf)("kload result = %#lx\n", RES); if (RES) POST_MEM_WRITE( RES, 64 ); ML_(aix5_rescan_procmap_after_load_or_unload)(); } PRE(sys_kunload64) { PRINT("kunload64 (UNDOCUMENTED)( %#lx, %ld, %ld, %#lx )", ARG1, ARG2, ARG3, ARG4 ); PRE_REG_READ4(long, "kunload64", void*, arg1, long, arg2, long, arg3, void*, arg4); } POST(sys_kunload64) { vg_assert(SUCCESS); ML_(aix5_rescan_procmap_after_load_or_unload)(); } PRE(sys_thread_setstate) { UWord dst_lwpid = (UWord)ARG1; struct tstate* ats_new = (struct tstate*)ARG2; struct tstate* ats_old = (struct tstate*)ARG3; ThreadId dst_tid = VG_INVALID_THREADID; ThreadState* dst_ts = NULL; Int i; /* Arrgh. We MUST retain the lock during this syscall. Reason is that this is sometimes used for asynchronous thread cancellation (nuking other threads). If we don't have the lock during the syscall, then it's possible that the thread we're nuking might get the lock before it gets killed off, and so we can never re-acquire the lock after this syscall, and the system deadlocks. */ /* 10 July 06: above comment is a misdiagnosis. It appears that for thread cancellation (that is, with ->flags == TSTATE_INTR) the target thread is has its PC changed by the the kernel to something else, possibly to pthread_exit(), so that it can run its cancellation handlers and exit. Currently is unknown how the kernel knows what to set the target thread's PC to. I did establish that all the other data passed in the struct is not relevant: when ->flags == TSTATE_INTR, all the other words can be set to 0x0 or 0xFFFFFFFF and the syscall still works. So the address is not passed like that. Also I looked at args to thread_setmystate_fast, which is used when a thread sets its cancellation state, but none of those are code addresses. Also, it's ok for the kernel to simply change the target thread's PC to something else for async thread cancellation, but for deferred cancellation something else is needed, and I can't see how that would work either. Anyway, net result is, target thread ends up not running on the simulator (not dead), which is why it's necessary to hold onto the lock at this point. */ /* 30 July 06: added kludge to intercept attempts to cancel another thread and instead just force that thread to run pthread_exit(PTHREAD_CANCELED). This allows V to keep control. */ PRINT("thread_setstate (BOGUS HANDLER) " "( %ld, %p,%p )", dst_lwpid, ats_new, ats_old); if (1 && VG_(clo_trace_syscalls) && ats_new) ML_(aix5debugstuff_show_tstate)((Addr)ats_new, "thread_setstate (NEW)"); /* Intercept and handle ourselves any attempts to cancel another thread (including this one). */ if (ats_new && (!ats_old) && ats_new->flags == TSTATE_INTR) { dst_ts = NULL; if (VG_(clo_trace_syscalls)) VG_(printf)("(INTR for lwpid %ld)", dst_lwpid); dst_tid = VG_INVALID_THREADID; for (i = 0; i < VG_N_THREADS; i++) { dst_ts = VG_(get_ThreadState)(i); if ((dst_ts->status == VgTs_Runnable || dst_ts->status == VgTs_Yielding || dst_ts->status == VgTs_WaitSys) && dst_ts->os_state.lwpid == dst_lwpid) { dst_tid = i; break; } } if (VG_(clo_trace_syscalls)) { if (dst_tid == VG_INVALID_THREADID) VG_(printf)("(== unknown tid)"); else VG_(printf)("(== tid %d)", (Int)dst_tid); } if (dst_tid != VG_INVALID_THREADID) { /* A cancel has been requested for ctid. If the target thread has cancellation enabled, honour it right now. If not, mark the thread as having a cancellation request, so that if it later enables cancellation then the cancellation will take effect. */ vg_assert(dst_ts); if (dst_ts->os_state.cancel_progress == Canc_NoRequest) { if (dst_ts->os_state.cancel_disabled) { if (VG_(clo_trace_syscalls)) VG_(printf)("(target has cancel disabled" "; request lodged)"); dst_ts->os_state.cancel_progress = Canc_Requested; } else { if (VG_(clo_trace_syscalls)) VG_(printf)("(forcing target into pthread_exit)"); dst_ts->os_state.cancel_progress = Canc_Actioned; Bool ok = ML_(aix5_force_thread_into_pthread_exit)(dst_tid); if (!ok) { /* now at serious risk of deadlock/livelock. Give up rather than continue. */ ML_(aix5_set_threadstate_for_emergency_exit) (tid, "pthread_cancel(case2-64): " "cannot find pthread_exit; aborting"); SET_STATUS_Success(0); return; } } } SET_STATUS_Success(0); return; } } /* Well, it's not a cancellation request. Maybe it is the initialisation of a previously created thread? */ if (ats_new && !ats_old) { dst_tid = VG_INVALID_THREADID; for (i = 0; i < VG_N_THREADS; i++) { dst_ts = VG_(get_ThreadState)(i); if (dst_ts->status == VgTs_Init && dst_ts->os_state.lwpid == dst_lwpid) { dst_tid = i; break; } } if (dst_tid != VG_INVALID_THREADID) { /* Found the associated child */ if (VG_(clo_trace_syscalls)) VG_(printf)("(initialised child tid %d)", (Int)dst_tid); dst_ts = VG_(get_ThreadState)(dst_tid); UWord* stack = (UWord*)ML_(allocstack)(dst_tid); /* XXX TODO: check allocstack failure */ /* copy the specified child register state into the guest slot (we need that context to run on the simulated CPU, not the real one) and put pointers to our own run-the-simulator function into what we'll hand off to the kernel instead. */ /* The guest thread is to start running whatever context this syscall showed up with. */ dst_ts->arch.vex.guest_GPR0 = ats_new->mst.gpr[0]; dst_ts->arch.vex.guest_GPR1 = ats_new->mst.gpr[1]; /* sp */ dst_ts->arch.vex.guest_GPR2 = ats_new->mst.gpr[2]; /* toc */ dst_ts->arch.vex.guest_GPR3 = ats_new->mst.gpr[3]; /* initarg */ dst_ts->arch.vex.guest_GPR4 = ats_new->mst.gpr[4]; dst_ts->arch.vex.guest_GPR5 = ats_new->mst.gpr[5]; dst_ts->arch.vex.guest_GPR6 = ats_new->mst.gpr[6]; dst_ts->arch.vex.guest_GPR7 = ats_new->mst.gpr[7]; dst_ts->arch.vex.guest_GPR8 = ats_new->mst.gpr[8]; dst_ts->arch.vex.guest_GPR9 = ats_new->mst.gpr[9]; dst_ts->arch.vex.guest_GPR10 = ats_new->mst.gpr[10]; dst_ts->arch.vex.guest_GPR11 = ats_new->mst.gpr[11]; /* ?? */ dst_ts->arch.vex.guest_GPR12 = ats_new->mst.gpr[12]; dst_ts->arch.vex.guest_GPR13 = ats_new->mst.gpr[13]; dst_ts->arch.vex.guest_GPR14 = ats_new->mst.gpr[14]; dst_ts->arch.vex.guest_GPR15 = ats_new->mst.gpr[15]; dst_ts->arch.vex.guest_GPR16 = ats_new->mst.gpr[16]; dst_ts->arch.vex.guest_GPR17 = ats_new->mst.gpr[17]; dst_ts->arch.vex.guest_GPR18 = ats_new->mst.gpr[18]; dst_ts->arch.vex.guest_GPR19 = ats_new->mst.gpr[19]; dst_ts->arch.vex.guest_GPR20 = ats_new->mst.gpr[20]; dst_ts->arch.vex.guest_GPR21 = ats_new->mst.gpr[21]; dst_ts->arch.vex.guest_GPR22 = ats_new->mst.gpr[22]; dst_ts->arch.vex.guest_GPR23 = ats_new->mst.gpr[23]; dst_ts->arch.vex.guest_GPR24 = ats_new->mst.gpr[24]; dst_ts->arch.vex.guest_GPR25 = ats_new->mst.gpr[25]; dst_ts->arch.vex.guest_GPR26 = ats_new->mst.gpr[26]; dst_ts->arch.vex.guest_GPR27 = ats_new->mst.gpr[27]; dst_ts->arch.vex.guest_GPR28 = ats_new->mst.gpr[28]; dst_ts->arch.vex.guest_GPR29 = ats_new->mst.gpr[29]; dst_ts->arch.vex.guest_GPR30 = ats_new->mst.gpr[30]; dst_ts->arch.vex.guest_GPR31 = ats_new->mst.gpr[31]; dst_ts->arch.vex.guest_CIA = ats_new->mst.iar; /* pc */ dst_ts->arch.vex.guest_LR = ats_new->mst.lr; dst_ts->arch.vex.guest_CTR = ats_new->mst.ctr; LibVEX_GuestPPC64_put_CR( ats_new->mst.cr, &dst_ts->arch.vex ); LibVEX_GuestPPC64_put_XER( ats_new->mst.xer, &dst_ts->arch.vex ); /* Record what seems like the highest legitimate stack address for this thread, so that the stack unwinder works properly. It seems reasonable to use the R1 value supplied here. */ dst_ts->client_stack_highest_word = dst_ts->arch.vex.guest_GPR1; /* The host thread is to start running start_thread_NORETURN */ UWord* wrapper_fdescr = (UWord*) & start_thread_NORETURN; ats_new->mst.gpr[1] = (UWord)stack; ats_new->mst.gpr[2] = wrapper_fdescr[1]; ats_new->mst.iar = wrapper_fdescr[0]; ats_new->mst.gpr[3] = (UWord)dst_ts; /* Set initial cancellation status for the thread. */ dst_ts->os_state.cancel_async = False; dst_ts->os_state.cancel_disabled = False; dst_ts->os_state.cancel_progress = Canc_NoRequest; } } } POST(sys_thread_setstate) { if (ARG3) POST_MEM_WRITE( ARG3, sizeof(struct tstate) ); if (0 && VG_(clo_trace_syscalls) && ARG3) ML_(aix5debugstuff_show_tstate)(ARG3, "thread_setstate (OLD)"); } PRE(sys_FAKE_SIGRETURN) { /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for an explanation of what follows. */ /* This handles the fake signal-return system call created by sigframe-ppc64-aix5.c. */ PRINT("FAKE_SIGRETURN ( )"); vg_assert(VG_(is_valid_tid)(tid)); vg_assert(tid >= 1 && tid < VG_N_THREADS); vg_assert(VG_(is_running_thread)(tid)); /* Remove the signal frame from this thread's (guest) stack, in the process restoring the pre-signal guest state. */ VG_(sigframe_destroy)(tid, True); /* Tell the driver not to update the guest state with the "result", and set a bogus result to keep it happy. */ *flags |= SfNoWriteResult; SET_STATUS_Success(0); /* Check to see if any signals arose as a result of this. */ *flags |= SfPollAfter; } /* --------------------------------------------------------------------- The ppc64/AIX5 syscall table ------------------------------------------------------------------ */ typedef struct { UInt* pSysNo; SyscallTableEntry wrappers; } AIX5SCTabEntry; #undef PLAXY #undef PLAX_ #define PLAXY(sysno, name) \ { & sysno, \ { & WRAPPER_PRE_NAME(ppc64_aix5, name), \ & WRAPPER_POST_NAME(ppc64_aix5, name) }} #define PLAX_(sysno, name) \ { & sysno, \ { & WRAPPER_PRE_NAME(ppc64_aix5, name), \ NULL }} static /* but not const */ AIX5SCTabEntry aix5_ppc64_syscall_table[] = { AIXXY(__NR_AIX5___libc_sbrk, sys___libc_sbrk), AIXX_(__NR_AIX5___msleep, sys___msleep), PLAXY(__NR_AIX5__clock_gettime, sys__clock_gettime), AIXX_(__NR_AIX5__exit, sys__exit), PLAX_(__NR_AIX5__fp_fpscrx64_, sys__fp_fpscrx64_), AIXX_(__NR_AIX5__getpid, sys__getpid), AIXXY(__NR_AIX5__nsleep, sys__nsleep), AIXX_(__NR_AIX5__pause, sys__pause), AIXXY(__NR_AIX5__poll, sys__poll), AIXX_(__NR_AIX5__select, sys__select), AIXX_(__NR_AIX5__sem_wait, sys__sem_wait), AIXXY(__NR_AIX5__sigaction, sys__sigaction), AIXX_(__NR_AIX5__thread_self, sys__thread_self), AIXX_(__NR_AIX5_access, sys_access), AIXX_(__NR_AIX5_accessx, sys_accessx), AIXXY(__NR_AIX5_appgetrlimit, sys_appgetrlimit), AIXXY(__NR_AIX5_appgetrusage, sys_appgetrusage), AIXX_(__NR_AIX5_appsetrlimit, sys_appsetrlimit), AIXX_(__NR_AIX5_appulimit, sys_appulimit), AIXX_(__NR_AIX5_bind, sys_bind), AIXX_(__NR_AIX5_chdir, sys_chdir), AIXX_(__NR_AIX5_chmod, sys_chmod), AIXX_(__NR_AIX5_chown, sys_chown), AIXX_(__NR_AIX5_close, sys_close), AIXX_(__NR_AIX5_connext, sys_connext), AIXX_(__NR_AIX5_execve, sys_execve), AIXXY(__NR_AIX5_finfo, sys_finfo), AIXXY(__NR_AIX5_fstatfs, sys_fstatfs), AIXXY(__NR_AIX5_fstatx, sys_fstatx), AIXXY(__NR_AIX5_getdirent, sys_getdirent), AIXXY(__NR_AIX5_getdirent64, sys_getdirent64), AIXXY(__NR_AIX5_getdomainname, sys_getdomainname), AIXX_(__NR_AIX5_getgidx, sys_getgidx), AIXXY(__NR_AIX5_gethostname, sys_gethostname), AIXXY(__NR_AIX5_getpriv, sys_getpriv), AIXXY(__NR_AIX5_getprocs, sys_getprocs), AIXXY(__NR_AIX5_getprocs64, sys_getprocs), /* XXX: correct? */ AIXX_(__NR_AIX5_getrpid, sys_getrpid), AIXXY(__NR_AIX5_getsockopt, sys_getsockopt), AIXX_(__NR_AIX5_gettimerid, sys_gettimerid), AIXX_(__NR_AIX5_getuidx, sys_getuidx), AIXXY(__NR_AIX5_incinterval, sys_incinterval), AIXXY(__NR_AIX5_kfcntl, sys_kfcntl), AIXX_(__NR_AIX5_kfork, sys_kfork), AIXX_(__NR_AIX5_kill, sys_kill), AIXXY(__NR_AIX5_kioctl, sys_kioctl), PLAXY(__NR_AIX5_kload, sys_kload), AIXX_(__NR_AIX5_klseek, sys_klseek), AIXXY(__NR_AIX5_kread, sys_kread), AIXXY(__NR_AIX5_kreadv, sys_kreadv), AIXX_(__NR_AIX5_kthread_ctl, sys_kthread_ctl), AIXX_(__NR_AIX5_ktruncate, sys_ktruncate), PLAXY(__NR_AIX5_kunload64, sys_kunload64), AIXXY(__NR_AIX5_kwaitpid, sys_kwaitpid), AIXX_(__NR_AIX5_kwrite, sys_kwrite), AIXX_(__NR_AIX5_kwritev, sys_kwritev), AIXX_(__NR_AIX5_lseek, sys_lseek), AIXX_(__NR_AIX5_mkdir, sys_mkdir), AIXXY(__NR_AIX5_mmap, sys_mmap), AIXXY(__NR_AIX5_mntctl, sys_mntctl), AIXXY(__NR_AIX5_mprotect, sys_mprotect), AIXXY(__NR_AIX5_munmap, sys_munmap), AIXXY(__NR_AIX5_ngetpeername, sys_ngetpeername), AIXXY(__NR_AIX5_ngetsockname, sys_ngetsockname), AIXXY(__NR_AIX5_nrecvfrom, sys_nrecvfrom), AIXX_(__NR_AIX5_nrecvmsg, sys_nrecvmsg), AIXX_(__NR_AIX5_open, sys_open), AIXXY(__NR_AIX5_pipe, sys_pipe), AIXX_(__NR_AIX5_privcheck, sys_privcheck), AIXX_(__NR_AIX5_rename, sys_rename), AIXXY(__NR_AIX5_sbrk, sys_sbrk), AIXXY(__NR_AIX5_sem_init, sys_sem_init), AIXXY(__NR_AIX5_sem_post, sys_sem_post), AIXX_(__NR_AIX5_send, sys_send), AIXX_(__NR_AIX5_setgid, sys_setgid), AIXX_(__NR_AIX5_setsockopt, sys_setsockopt), AIXX_(__NR_AIX5_setuid, sys_setuid), AIXXY(__NR_AIX5_shmat, sys_shmat), AIXXY(__NR_AIX5_shmctl, sys_shmctl), AIXXY(__NR_AIX5_shmdt, sys_shmdt), AIXX_(__NR_AIX5_shmget, sys_shmget), AIXX_(__NR_AIX5_shutdown, sys_shutdown), AIXX_(__NR_AIX5_sigcleanup, sys_sigcleanup), AIXXY(__NR_AIX5_sigprocmask, sys_sigprocmask), AIXXY(__NR_AIX5_sys_parm, sys_sys_parm), AIXXY(__NR_AIX5_sysconfig, sys_sysconfig), AIXX_(__NR_AIX5_socket, sys_socket), AIXXY(__NR_AIX5_statx, sys_statx), AIXXY(__NR_AIX5_thread_create, sys_thread_create), AIXX_(__NR_AIX5_thread_init, sys_thread_init), AIXX_(__NR_AIX5_thread_kill, sys_thread_kill), AIXXY(__NR_AIX5_thread_setmystate, sys_thread_setmystate), AIXX_(__NR_AIX5_thread_setmystate_fast, sys_thread_setmystate_fast), PLAXY(__NR_AIX5_thread_setstate, sys_thread_setstate), AIXX_(__NR_AIX5_thread_terminate_unlock, sys_thread_terminate_unlock), AIXX_(__NR_AIX5_thread_tsleep, sys_thread_tsleep), AIXX_(__NR_AIX5_thread_twakeup, sys_thread_twakeup), AIXX_(__NR_AIX5_thread_unlock, sys_thread_unlock), AIXX_(__NR_AIX5_thread_waitlock_, sys_thread_waitlock_), AIXXY(__NR_AIX5_times, sys_times), AIXXY(__NR_AIX5_uname, sys_uname), AIXX_(__NR_AIX5_unlink, sys_unlink), AIXX_(__NR_AIX5_utimes, sys_utimes), AIXXY(__NR_AIX5_vmgetinfo, sys_vmgetinfo), AIXX_(__NR_AIX5_yield, sys_yield), PLAX_(__NR_AIX5_FAKE_SIGRETURN, sys_FAKE_SIGRETURN) }; SyscallTableEntry* ML_(get_ppc64_aix5_syscall_entry) ( UInt sysno ) { Int i; AIX5SCTabEntry tmp; const Int tab_size = sizeof(aix5_ppc64_syscall_table) / sizeof(aix5_ppc64_syscall_table[0]); for (i = 0; i < tab_size; i++) if (sysno == *(aix5_ppc64_syscall_table[i].pSysNo)) break; vg_assert(i >= 0 && i <= tab_size); if (i == tab_size) return NULL; /* can't find a wrapper */ /* Move found one a bit closer to the front, so as to make future searches cheaper. */ if (i > 0) { tmp = aix5_ppc64_syscall_table[i-1]; aix5_ppc64_syscall_table[i-1] = aix5_ppc64_syscall_table[i]; aix5_ppc64_syscall_table[i] = tmp; i--; } vg_assert(i >= 0 && i < tab_size); return &aix5_ppc64_syscall_table[i].wrappers; } #endif // defined(VGP_ppc64_aix5) /*--------------------------------------------------------------------*/ /*--- end ---*/ /*--------------------------------------------------------------------*/
MicroTrustRepos/microkernel
src/l4/pkg/valgrind/src/valgrind-3.6.0-svn/coregrind/m_syswrap/syswrap-ppc64-aix5.c
C
gpl-2.0
30,484
/* * linux/arch/arm/mach-mmp/brownstone.c * * Support for the Marvell Brownstone Development Platform. * * Copyright (C) 2009-2010 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio-pxa.h> #include <linux/regulator/machine.h> #include <linux/regulator/max8649.h> #include <linux/regulator/fixed.h> #include <linux/mfd/max8925.h> #include <linux/usb/phy.h> #include <linux/usb/mv_usb2_phy.h> #include <linux/platform_data/mv_usb.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include <mach/irqs.h> #include "common.h" #define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40) #define GPIO_5V_ENABLE (89) static unsigned long brownstone_pin_config[] __initdata = { /* UART1 */ GPIO29_UART1_RXD, GPIO30_UART1_TXD, /* UART3 */ GPIO51_UART3_RXD, GPIO52_UART3_TXD, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, /* PMIC */ PMIC_PMIC_INT | MFP_LPM_EDGE_FALL, /* MMC0 */ GPIO131_MMC1_DAT3 | MFP_PULL_HIGH, GPIO132_MMC1_DAT2 | MFP_PULL_HIGH, GPIO133_MMC1_DAT1 | MFP_PULL_HIGH, GPIO134_MMC1_DAT0 | MFP_PULL_HIGH, GPIO136_MMC1_CMD | MFP_PULL_HIGH, GPIO139_MMC1_CLK, GPIO140_MMC1_CD | MFP_PULL_LOW, GPIO141_MMC1_WP | MFP_PULL_LOW, /* MMC1 */ GPIO37_MMC2_DAT3 | MFP_PULL_HIGH, GPIO38_MMC2_DAT2 | MFP_PULL_HIGH, GPIO39_MMC2_DAT1 | MFP_PULL_HIGH, GPIO40_MMC2_DAT0 | MFP_PULL_HIGH, GPIO41_MMC2_CMD | MFP_PULL_HIGH, GPIO42_MMC2_CLK, /* MMC2 */ GPIO165_MMC3_DAT7 | MFP_PULL_HIGH, GPIO162_MMC3_DAT6 | MFP_PULL_HIGH, GPIO166_MMC3_DAT5 | MFP_PULL_HIGH, GPIO163_MMC3_DAT4 | MFP_PULL_HIGH, GPIO167_MMC3_DAT3 | MFP_PULL_HIGH, GPIO164_MMC3_DAT2 | MFP_PULL_HIGH, GPIO168_MMC3_DAT1 | MFP_PULL_HIGH, GPIO111_MMC3_DAT0 | MFP_PULL_HIGH, GPIO112_MMC3_CMD | MFP_PULL_HIGH, GPIO151_MMC3_CLK, /* 5V regulator */ GPIO89_GPIO, }; static struct pxa_gpio_platform_data mmp2_gpio_pdata = { .irq_base = MMP_GPIO_TO_IRQ(0), }; static struct regulator_consumer_supply max8649_supply[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data max8649_init_data = { .constraints = { .name = "vcc_core range", .min_uV = 1150000, .max_uV = 1280000, .always_on = 1, .boot_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = 1, .consumer_supplies = &max8649_supply[0], }; static struct max8649_platform_data brownstone_max8649_info = { .mode = 2, /* VID1 = 1, VID0 = 0 */ .extclk = 0, .ramp_timing = MAX8649_RAMP_32MV, .regulator = &max8649_init_data, }; static struct regulator_consumer_supply brownstone_v_5vp_supplies[] = { REGULATOR_SUPPLY("v_5vp", NULL), }; static struct regulator_init_data brownstone_v_5vp_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(brownstone_v_5vp_supplies), .consumer_supplies = brownstone_v_5vp_supplies, }; static struct fixed_voltage_config brownstone_v_5vp = { .supply_name = "v_5vp", .microvolts = 5000000, .gpio = GPIO_5V_ENABLE, .enable_high = 1, .enabled_at_boot = 1, .init_data = &brownstone_v_5vp_data, }; static struct platform_device brownstone_v_5vp_device = { .name = "reg-fixed-voltage", .id = 1, .dev = { .platform_data = &brownstone_v_5vp, }, }; static struct max8925_platform_data brownstone_max8925_info = { .irq_base = MMP_NR_IRQS, }; static struct i2c_board_info brownstone_twsi1_info[] = { [0] = { .type = "max8649", .addr = 0x60, .platform_data = &brownstone_max8649_info, }, [1] = { .type = "max8925", .addr = 0x3c, .irq = IRQ_MMP2_PMIC, .platform_data = &brownstone_max8925_info, }, }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { .clk_delay_cycles = 0x1f, }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { .clk_delay_cycles = 0x1f, .flags = PXA_FLAG_CARD_PERMANENT | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, }; static struct sram_platdata mmp2_asram_platdata = { .pool_name = "asram", .granularity = SRAM_GRANULARITY, }; static struct sram_platdata mmp2_isram_platdata = { .pool_name = "isram", .granularity = SRAM_GRANULARITY, }; #ifdef CONFIG_USB_SUPPORT #if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O) static struct mv_usb_platform_data brownstone_usb_pdata = { .vbus = NULL, .mode = MV_USB_MODE_OTG, .otg_force_a_bus_req = 1, .set_vbus = NULL, }; #endif #endif static void __init brownstone_init(void) { mfp_config(ARRAY_AND_SIZE(brownstone_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(3); platform_device_add_data(&mmp2_device_gpio, &mmp2_gpio_pdata, sizeof(struct pxa_gpio_platform_data)); platform_device_register(&mmp2_device_gpio); mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ mmp2_add_asram(&mmp2_asram_platdata); mmp2_add_isram(&mmp2_isram_platdata); /* enable 5v regulator */ platform_device_register(&brownstone_v_5vp_device); #ifdef CONFIG_USB_SUPPORT pxa_register_device(&mmp2_device_u2ophy, NULL, 0); #endif #ifdef CONFIG_USB_MV_UDC /* for usb2 phy */ usb_bind_phy(mmp2_device_u2o.drv_name, MV_USB2_PHY_INDEX, mmp2_device_u2ophy.drv_name); #ifdef CONFIG_USB_MV_OTG /* for usb2 otg phy */ usb_bind_phy(mmp2_device_u2o.drv_name, MV_USB2_OTG_PHY_INDEX, mmp2_device_u2ootg.drv_name); #endif pxa_register_device(&mmp2_device_u2o, &brownstone_usb_pdata, sizeof(brownstone_usb_pdata)); #endif #ifdef CONFIG_USB_EHCI_MV_U2O /* for usb2 phy */ usb_bind_phy(mmp2_device_u2oehci.dev_name, MV_USB2_PHY_INDEX, mmp2_device_u2ophy.dev_name); #ifdef CONFIG_USB_MV_OTG /* for usb2 otg phy */ usb_bind_phy(mmp2_device_u2oehci.drv_name, MV_USB2_OTG_PHY_INDEX, mmp2_device_u2ootg.drv_name); #endif pxa_register_device(&mmp2_device_u2oehci, &brownstone_usb_pdata, sizeof(brownstone_usb_pdata)); #endif #ifdef CONFIG_USB_MV_OTG /* for usb2 phy */ usb_bind_phy(mmp2_device_u2ootg.dev_name, MV_USB2_PHY_INDEX, mmp2_device_u2ophy.dev_name); pxa_register_device(&mmp2_device_u2ootg, &brownstone_usb_pdata, sizeof(brownstone_usb_pdata)); #endif } MACHINE_START(BROWNSTONE, "Brownstone Development Platform") /* Maintainer: Haojian Zhuang <haojian.zhuang@marvell.com> */ .map_io = mmp_map_io, .nr_irqs = BROWNSTONE_NR_IRQS, .init_irq = mmp2_init_irq, .init_time = mmp2_timer_init, .init_machine = brownstone_init, .restart = mmp_restart, MACHINE_END
chase2534/gtab47.freekern
arch/arm/mach-mmp/brownstone.c
C
gpl-2.0
7,186
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ /* * Changes: * Pedro Roque : Fast Retransmit/Recovery. * Two receive queues. * Retransmit queue handled by TCP. * Better retransmit timer handling. * New congestion avoidance. * Header prediction. * Variable renaming. * * Eric : Fast Retransmit. * Randy Scott : MSS option defines. * Eric Schenk : Fixes to slow start algorithm. * Eric Schenk : Yet another double ACK bug. * Eric Schenk : Delayed ACK bug fixes. * Eric Schenk : Floyd style fast retrans war avoidance. * David S. Miller : Don't allow zero congestion window. * Eric Schenk : Fix retransmitter so that it sends * next packet on ack of previous packet. * Andi Kleen : Moved open_request checking here * and process RSTs for open_requests. * Andi Kleen : Better prune_queue, and other fixes. * Andrey Savochkin: Fix RTT measurements in the presence of * timestamps. * Andrey Savochkin: Check sequence numbers correctly when * removing SACKs due to in sequence incoming * data segments. * Andi Kleen: Make sure we never ack data there is not * enough room for. Also make this condition * a fatal error if it might still happen. * Andi Kleen: Add tcp_measure_rcv_mss to make * connections with MSS<min(MTU,ann. MSS) * work without delayed acks. * Andi Kleen: Process packets with PSH set in the * fast path. * J Hadi Salim: ECN support * Andrei Gurtov, * Pasi Sarolahti, * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/kernel.h> #include <net/dst.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/ipsec.h> #include <asm/unaligned.h> #include <net/netdma.h> int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); /* rfc5961 challenge ack rate limiting */ int sysctl_tcp_challenge_ack_limit = 100; int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_early_retrans __read_mostly = 3; int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. */ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } } static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */ static inline bool tcp_in_quickack_mode(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (!(tp->ecn_flags & TCP_ECN_OK)) return; switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ tcp_enter_quickack_mode((struct sock *)tp); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } /* fallinto */ default: tp->ecn_flags |= TCP_ECN_SEEN; } } static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return true; return false; } /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */ static void tcp_fixup_sndbuf(struct sock *sk) { int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); sndmem *= TCP_INIT_CWND; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; } static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !sk_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } } /* 3. Tuning rcvbuf, when connection enters established state. */ static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; u32 icwnd = sysctl_tcp_default_init_rwnd; int rcvmem; /* Limit to 10 segments if mss <= 1460, * or 14600/mss segments, with a minimum of two segments. */ if (mss > 1460) icwnd = max_t(u32, (1460 * icwnd) / mss, 2); rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < mss) rcvmem += 128; rcvmem *= icwnd; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_fixup_sndbuf(sk); tp->rcvq_space.space = tp->rcv_wnd; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !sk_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); } /* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; } EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else { m <<= 3; if (m < new_sample) new_sample = m; } } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */ void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int space; if (tp->rcvq_space.time == 0) goto new_measure; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; space = 2 * (tp->copied_seq - tp->rcvq_space.seq); space = max(tp->rcvq_space.space, space); if (tp->rcvq_space.space != space) { int rcvmem; tp->rcvq_space.space = space; if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int new_clamp = space; /* Receive space grows, normalize in order to * take into account packet headers and sk_buff * structure overhead. */ space /= tp->advmss; if (!space) space = 1; rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; space *= rcvmem; space = min(space, sysctl_tcp_rmem[2]); if (space > sk->sk_rcvbuf) { sk->sk_rcvbuf = space; /* Make the window clamp follow along. */ tp->window_clamp = new_clamp; } } } new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; } /* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; TCP_ECN_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt; /* RTT */ /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (m == 0) m = 1; if (tp->srtt != 0) { m -= (tp->srtt >> 3); /* m is now error in rtt est */ tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev >> 2); /* similar update on mdev */ } tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev > tp->mdev_max) { tp->mdev_max = tp->mdev; if (tp->mdev_max > tp->rttvar) tp->rttvar = tp->mdev_max; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max < tp->rttvar) tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max = tcp_rto_min(sk); } } else { /* no previous measure. */ tp->srtt = m << 3; /* take the measured time to be rtt */ tp->mdev = m << 1; /* make sure rto = 3*rtt */ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); tp->rtt_seq = tp->snd_nxt; } } /* Set the sk_pacing_rate to allow proper sizing of TSO packets. * Note: TCP stack does not yet implement pacing. * FQ packet scheduler can be used to implement cheap but effective * TCP pacing, to smooth the burst on large writes when packets * in flight is significantly lower than cwnd (or rwin) */ static void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ rate = (u64)tp->mss_cache * 2 * (HZ << 3); rate *= max(tp->snd_cwnd, tp->packets_out); /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) * We probably need usec resolution in the future. * Note: This also takes care of possible srtt=0 case, * when tcp_rtt_estimator() was not yet called. */ if (tp->srtt > 8 + 2) do_div(rate, tp->srtt); sk->sk_pacing_rate = min_t(u64, rate, ~0U); } /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); } __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */ void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; } static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(TCP_MAX_REORDERING, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } if (metric > 0) tcp_disable_early_retrans(tp); } /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if ((tp->retransmit_skb_hint == NULL) || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modification, head until snd.fack is lost. * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return true; if (!is_dsack || !tp->undo_marker) return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return false; if (!before(start_seq, tp->undo_marker)) return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); } /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". * Event "B". Later note: FACK people cheated me again 8), we have to account * for reordering! Ugly, but should help. * * Search retransmitted skbs from write_queue that were sent when snd_nxt was * less than what is now known to be received by the other end (derived from * highest SACK block). Also calculate the lowest snd_nxt among the remaining * retransmitted skbs to avoid some costly processing per ACKs. */ static void tcp_mark_lost_retrans(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt = 0; u32 new_low_seq = tp->snd_nxt; u32 received_upto = tcp_highest_sack_seq(tp); if (!tcp_is_fack(tp) || !tp->retrans_out || !after(received_upto, tp->lost_retrans_low) || icsk->icsk_ca_state != TCP_CA_Recovery) return; tcp_for_write_queue(skb, sk) { u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; if (skb == tcp_send_head(sk)) break; if (cnt == tp->retrans_out) break; if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) continue; if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) continue; /* TODO: We would like to get rid of tcp_is_fack(tp) only * constraint here (see above) but figuring out that at * least tp->reordering SACK blocks reside between ack_seq * and received_upto is not easy task to do cheaply with * the available datastructures. * * Whether FACK should check here for tp->reordering segs * in-between one could argue for either way (it would be * rather simple to implement as we could count fack_count * during the walk and do tp->fackets_out - fack_count). */ if (after(received_upto, ack_seq)) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); tcp_skb_mark_lost_uncond_verify(tp, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); } else { if (before(ack_seq, new_low_seq)) new_low_seq = ack_seq; cnt += tcp_skb_pcount(skb); } } if (tp->retrans_out) tp->lost_retrans_low = new_low_seq; } static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; } struct tcp_sacktag_state { int reord; int fack_count; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int err; bool in_sack; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len > skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss); if (err < 0) return err; } return in_sack; } /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, bool dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; } /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); /* Adjust counters and hints for the newly sacked sequence * range but discard the return value since prev is already * marked. We must tag the range first because the seq * advancement below implicitly advances * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; skb_shinfo(prev)->gso_segs += pcount; BUG_ON(skb_shinfo(skb)->gso_segs < pcount); skb_shinfo(skb)->gso_segs -= pcount; /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!skb_shinfo(prev)->gso_size) { skb_shinfo(prev)->gso_size = mss; skb_shinfo(prev)->gso_type = sk->sk_gso_type; } /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (skb_shinfo(skb)->gso_segs <= 1) { skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->scoreboard_skb_hint) tp->scoreboard_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) TCP_SKB_CB(prev)->end_seq++; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); return true; } /* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */ static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); } /* Shifting pages past head area doesn't work */ static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); } /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if ((next_dup != NULL) && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = true; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp != NULL) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb)); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; } /* Avoid all extra work that is being done by sacktag while walking in * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; } static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (next_dup == NULL) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; } static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct tcp_sacktag_state state; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; bool found_dup_sack = false; int i, j; int first_sack_index; state.flag = 0; state.reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state.flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state.fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, &state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, &state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, &state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, end_seq, dup_sack); advance_sp: i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; tcp_mark_lost_retrans(sk); tcp_verify_left_out(tp); if ((state.reord < tp->fackets_out) && ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state.flag; } /* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns false if sacked_out adjustement wasn't necessary. */ static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return true; } return false; } /* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); } /* Emulate SACKs for SACKless connection: account for a new dupack. */ static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->sacked_out++; tcp_check_reno_reordering(sk, 0); tcp_verify_left_out(tp); } /* Account for ACK, ACKing some data in Reno Recovery phase. */ static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); } static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; } static void tcp_clear_retrans_partial(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = 0; } void tcp_clear_retrans(struct tcp_sock *tp) { tcp_clear_retrans_partial(tp); tp->fackets_out = 0; tp->sacked_out = 0; } /* Enter Loss state. If "how" is not zero, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ void tcp_enter_loss(struct sock *sk, int how) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; bool new_recovery = false; /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { new_recovery = true; tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tcp_clear_retrans_partial(tp); if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); tp->undo_marker = tp->snd_una; if (how) { tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing */ tp->frto = sysctl_tcp_frto && (new_recovery || icsk->icsk_retransmits) && !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * Do processing similar to RTO timeout. */ static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct inet_connection_sock *icsk = inet_csk(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tcp_enter_loss(sk, 1); icsk->icsk_retransmits++; tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); return true; } return false; } static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; } /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } static bool tcp_pause_early_retransmit(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay; /* Delay early retransmit and entering fast recovery for * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || (flag & FLAG_ECE) || !tp->srtt) return false; delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, TCP_RTO_MAX); return true; } static inline int tcp_skb_timedout(const struct sock *sk, const struct sk_buff *skb) { return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; } static inline int tcp_head_timedout(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return tp->packets_out && tcp_skb_timedout(sk, tcp_write_queue_head(sk)); } /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */ static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return true; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ if (tcp_is_fack(tp) && tcp_head_timedout(sk)) return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return true; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return true; /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious * retransmissions due to small network reorderings, we implement * Mitigation A.3 in the RFC and delay the retransmission for a short * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); return false; } /* New heuristics: it is possible only after we switched to restart timer * each time when something is ACKed. Hence, we can detect timed out packets * during fast retransmit without falling to slow start. * * Usefulness of this as is very questionable, since we should know which of * the segments is the next to timeout which is relatively expensive to find * in general case unless we add some data structure just for that. The * current approach certainly won't find the right one too often and when it * finally does find _something_ it usually marks large part of the window * right away (because a retransmission with a larger timestamp blocks the * loop from advancing). -ij */ static void tcp_timeout_skbs(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) return; skb = tp->scoreboard_skb_hint; if (tp->scoreboard_skb_hint == NULL) skb = tcp_write_queue_head(sk); tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (!tcp_skb_timedout(sk, skb)) break; tcp_skb_mark_lost(tp, skb); } tp->scoreboard_skb_hint = skb; tcp_verify_left_out(tp); } /* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it * has at least tp->reordering SACKed seqments above it; "packets" refers to * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt; int err; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = skb_shinfo(skb)->gso_size; err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); if (err < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } tcp_timeout_skbs(sk); } /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); } /* Undo procedures. */ #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif } #else #define DBGUNDO(x...) do { } while (0) #endif static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; TCP_ECN_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; } static inline bool tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, true); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->undo_marker = 0; } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); return true; } tcp_set_ca_state(sk, TCP_CA_Open); return false; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ static void tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwr(sk, true); tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); } } /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return true; return false; } /* Undo during fast recovery after partial ACK. */ static int tcp_try_undo_partial(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); if (tcp_may_undo(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); DBGUNDO(sk, "Hoe"); tcp_undo_cwr(sk, false); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); /* So... Do not make Hoe's retransmit yet. * If the first packet was delayed, the rest * ones are most probably delayed as well. */ failed = 0; } return failed; } /* Undo during loss recovery after partial ACK or using F-RTO. */ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk); if (frto_undo || tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tcp_clear_all_retrans_hints(tp); DBGUNDO(sk, "partial loss"); tp->lost_out = 0; tcp_undo_cwr(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } return false; } /* The cwnd reduction in CWR and Recovery use the PRR algorithm * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/ * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) If packets in flight is lower than ssthresh (such as due to excess * losses and/or application stalls), do not perform any further cwnd * reductions, but instead slow start up to ssthresh. */ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); tp->high_seq = tp->snd_nxt; tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; if (set_ssthresh) tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); TCP_ECN_queue_cwr(tp); } static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); tp->prr_delivered += newly_acked_sacked; if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } static inline void tcp_end_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); tp->prior_ssthresh = 0; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; tcp_init_cwnd_reduction(sk, set_ssthresh); tcp_set_ca_state(sk, TCP_CA_CWR); } } static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } } static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_moderate_cwnd(tp); } else { tcp_cwnd_reduction(sk, newly_acked_sacked, 0); } } static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; } static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } /* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */ void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); } EXPORT_SYMBOL(tcp_simple_retransmit); static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); int mib_idx; if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; tp->undo_retrans = tp->retrans_out; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_init_cwnd_reduction(sk, true); } tcp_set_ca_state(sk, TCP_CA_Recovery); } /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are * recovered or spurious. Otherwise retransmits more on partial ACKs. */ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); bool recovered = !before(tp->snd_una, tp->high_seq); if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ /* Step 3.b. A timeout is spurious if not all data are * lost, i.e., never-retransmitted data are (s)acked. */ if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) return; if (after(tp->snd_nxt, tp->high_seq) && (flag & FLAG_DATA_SACKED || is_dupack)) { tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { tp->high_seq = tp->snd_nxt; __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); if (after(tp->snd_nxt, tp->high_seq)) return; /* Step 2.b */ tp->frto = 0; } } if (recovered) { /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ icsk->icsk_retransmits = 0; tcp_try_undo_recovery(sk); return; } if (flag & FLAG_DATA_ACKED) icsk->icsk_retransmits = 0; if (tcp_is_reno(tp)) { /* A Reno DUPACK means new data in F-RTO step 2.b above are * delivered. Lower inflight to clock out (re)tranmissions. */ if (after(tp->snd_nxt, tp->high_seq) && is_dupack) tcp_add_reno_sack(sk); else if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); } if (tcp_try_undo_loss(sk, false)) return; tcp_xmit_retransmit_queue(sk); } /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it does CWND reduction, when packet loss is detected * and changes state of machine. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int prior_sacked, int prior_packets, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int newly_acked_sacked = 0; int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_end_cwnd_reduction(sk); break; } } /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); newly_acked_sacked = prior_packets - tp->packets_out + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack); if (icsk->icsk_ca_state != TCP_CA_Open) return; /* Fall through to processing in Open state. */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } newly_acked_sacked = prior_packets - tp->packets_out + tp->sacked_out - prior_sacked; if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag, newly_acked_sacked); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); tcp_xmit_retransmit_queue(sk); } void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) { tcp_rtt_estimator(sk, seq_rtt); tcp_set_rto(sk); inet_csk(sk)->icsk_backoff = 0; } EXPORT_SYMBOL(tcp_valid_rtt_meas); /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Supersedes RFC1323) */ static void tcp_ack_saw_tstamp(struct sock *sk, int flag) { /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * * See draft-ietf-tcplw-high-performance-00, section 3.3. * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> * * Changed: reset backoff as soon as we see the first valid sample. * If we do not, we get strongly overestimated rto. With timestamps * samples are accepted even from very old segments: f.e., when rtt=1 * increases to 8, we retransmit 5 times and after 8 seconds delayed * answer arrives rto becomes 120 seconds! If at least one of segments * in window is lost... Voila. --ANK (010210) */ struct tcp_sock *tp = tcp_sk(sk); tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); } static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine * rtt estimates. Also, we must not reset the * backoff for rto until we get a non-retransmitted * packet. This allows us to deal with a situation * where the network delay has increased suddenly. * I.e. Karn's algorithm. (SIGCOMM '87, p5.) */ if (flag & FLAG_RETRANS_DATA_ACKED) return; tcp_valid_rtt_meas(sk, seq_rtt); } static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, const s32 seq_rtt) { const struct tcp_sock *tp = tcp_sk(sk); /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tcp_ack_saw_tstamp(sk, flag); else if (seq_rtt >= 0) tcp_ack_no_tstamp(sk, seq_rtt, flag); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ void tcp_rearm_rto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* If the retrans timer is currently being used by Fast Open * for SYN-ACK retrans purpose, stay put. */ if (tp->fastopen_rsk) return; if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } } /* This function is called when the delayed ER timer fires. TCP enters * fast recovery and performs fast-retransmit. */ void tcp_resume_early_retransmit(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_rearm_rto(sk); /* Stop if ER is disabled after the delayed ER timer is scheduled */ if (!tp->do_early_retrans) return; tcp_enter_recovery(sk, false); tcp_update_scoreboard(sk, 1); tcp_xmit_retransmit_queue(sk); } /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; } /* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; u32 now = tcp_time_stamp; int fully_acked = true; int flag = 0; u32 pkts_acked = 0; u32 reord = tp->packets_out; u32 prior_sacked = tp->sacked_out; s32 seq_rtt = -1; s32 ca_seq_rtt = -1; ktime_t last_ackt = net_invalid_timestamp(); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u32 acked_pcount; u8 sacked = scb->sacked; /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = false; } else { acked_pcount = tcp_skb_pcount(skb); } if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; ca_seq_rtt = -1; seq_rtt = -1; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; if (seq_rtt < 0) { seq_rtt = ca_seq_rtt; } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (!(scb->tcp_flags & TCPHDR_SYN)) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); tp->scoreboard_skb_hint = NULL; if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = NULL; if (skb == tp->lost_skb_hint) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; if (flag & FLAG_ACKED) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } tcp_ack_update_rtt(sk, flag, seq_rtt); tcp_rearm_rto(sk); if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); if (ca_ops->pkts_acked) { s32 rtt_us = -1; /* Is the ACK triggering packet unambiguous? */ if (!(flag & FLAG_RETRANS_DATA_ACKED)) { /* High resolution needed and available? */ if (ca_ops->flags & TCP_CONG_RTT_STAMP && !ktime_equal(last_ackt, net_invalid_timestamp())) rtt_us = ktime_us_delta(ktime_get_real(), last_ackt); else if (ca_seq_rtt >= 0) rtt_us = jiffies_to_usecs(ca_seq_rtt); } ca_ops->pkts_acked(sk, pkts_acked, rtt_us); } } #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { pr_debug("Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { pr_debug("Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { pr_debug("Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif return flag; } static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), TCP_RTO_MAX); } } static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) { const struct tcp_sock *tp = tcp_sk(sk); return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && !tcp_in_cwnd_reduction(sk); } /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ static inline bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tp->snd_una = ack; return flag; } /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; u32 now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); } static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } } /* This routine deals with acks during a TLP episode. * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { struct tcp_sock *tp = tcp_sk(sk); bool is_tlp_dupack = (ack == tp->tlp_high_seq) && !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP | FLAG_DATA_SACKED)); /* Mark the end of TLP episode on receiving TLP dupack or when * ack is after tlp_high_seq. */ if (is_tlp_dupack) { tp->tlp_high_seq = 0; return; } if (after(ack, tp->tlp_high_seq)) { tp->tlp_high_seq = 0; /* Don't reduce cwnd if DSACK arrives for TLP retrans. */ if (!(flag & FLAG_DSACKING_ACK)) { tcp_init_cwnd_reduction(sk, true); tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); tcp_try_keep_open(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBERECOVERY); } } } /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt; u32 prior_fackets; int prior_packets = tp->packets_out; int prior_sacked = tp->sacked_out; int pkts_acked = 0; int previous_packets_out = 0; /* If the ack is older than previous acks * then we can probably ignore it. */ if (before(ack, prior_snd_una)) { /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ if (before(ack, prior_snd_una - tp->max_window)) { tcp_send_challenge_ack(sk); return -1; } goto old_ack; } /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). */ if (after(ack, tp->snd_nxt)) goto invalid_ack; if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); if (after(ack, prior_snd_una)) flag |= FLAG_SND_UNA_ADVANCED; prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); /* ts_recent update must be made after we are sure that the packet * is in window. */ if (flag & FLAG_UPDATE_TS_RECENT) tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); tp->snd_una = ack; flag |= FLAG_WIN_UPDATE; tcp_ca_event(sk, CA_EVENT_FAST_ACK); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; tcp_ca_event(sk, CA_EVENT_SLOW_ACK); } /* We passed data and got it acked, remove any soft error * log. Something worked... */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ previous_packets_out = tp->packets_out; flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = previous_packets_out - tp->packets_out; if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, prior_packets, is_dupack, flag); } else { if (flag & FLAG_DATA_ACKED) tcp_cong_avoid(sk, ack, prior_in_flight); } if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); } if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd) tcp_update_pacing_rate(sk); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, prior_packets, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ if (tcp_send_head(sk)) tcp_ack_probe(sk); if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); return 1; invalid_ack: SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return -1; old_ack: /* If data was SACKed, tag it and see if we should send more data. * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, prior_packets, is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return 0; } /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. */ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc) { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); opt_rx->saw_tstamp = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) return; /* don't parse partial options */ switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); if (in_mss) { if (opt_rx->user_mss && opt_rx->user_mss < in_mss) in_mss = opt_rx->user_mss; opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", __func__, snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; } break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } break; case TCPOPT_SACK: if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* * The MD5 Hash has already been * checked (see tcp_v{4,6}_do_rcv()). */ break; #endif case TCPOPT_EXP: /* Fast Open option shares code 254 using a * 16 bits magic number. It's valid only in * SYN or SYN-ACK with an even size. */ if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || foc == NULL || !th->syn || (opsize & 1)) break; foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && foc->len <= TCP_FASTOPEN_COOKIE_MAX) memcpy(foc->val, ptr + 2, foc->len); else if (foc->len != 0) foc->len = -1; break; } ptr += opsize-2; length -= opsize; } } } EXPORT_SYMBOL(tcp_parse_options); static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { tp->rx_opt.saw_tstamp = 1; ++ptr; tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; if (*ptr) tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; else tp->rx_opt.rcv_tsecr = 0; return true; } return false; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ static bool tcp_fast_parse_options(const struct sk_buff *skb, const struct tcphdr *th, struct tcp_sock *tp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; return false; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) return true; } tcp_parse_options(skb, &tp->rx_opt, 1, NULL); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; return true; } #ifdef CONFIG_TCP_MD5SIG /* * Parse MD5 Signature option */ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) { int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); /* If the TCP option is too short, we can short cut */ if (length < TCPOLEN_MD5SIG) return NULL; while (length > 0) { int opcode = *ptr++; int opsize; switch(opcode) { case TCPOPT_EOL: return NULL; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2 || opsize > length) return NULL; if (opcode == TCPOPT_MD5SIG) return opsize == TCPOLEN_MD5SIG ? ptr : NULL; } ptr += opsize - 2; length -= opsize; } return NULL; } EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) * it can pass through stack. So, the following predicate verifies that * this segment is not used for anything but congestion avoidance or * fast retransmit. Moreover, we even are able to eliminate most of such * second order effects, if we apply some small "replay" window (~RTO) * to timestamp space. * * All these measures still do not guarantee that we reject wrapped ACKs * on networks with high bandwidth, when sequence space is recycled fastly, * but it guarantees that such events will be very rare and do not affect * connection seriously. This doesn't look nice, but alas, PAWS is really * buggy extension. * * [ Later note. Even worse! It is buggy for segments _with_ data. RFC * states that events when retransmit arrives after original data are rare. * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is * the biggest problem on large power networks even with minor reordering. * OK, let's give it small replay window. If peer clock is even 1hz, it is safe * up to bandwidth of 18Gigabit/sec. 8) ] */ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; return (/* 1. Pure ACK with correct sequence number. */ (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && /* 2. ... and duplicate ACK. */ ack == tp->snd_una && /* 3. ... and does not update window. */ !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); } static inline bool tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && !tcp_disordered_ack(sk, skb); } /* Check segment sequence number for validity. * * Segment controls are considered valid, if the segment * fits to the window after truncation to the window. Acceptability * of data (and SYN, FIN, of course) is checked separately. * See tcp_data_queue(), for example. * * Also, controls (RST is main one) are accepted using RCV.WUP instead * of RCV.NXT. Peer still did not advance his SND.UNA when we * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. * (borrowed from freebsd) */ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); } /* When we get a reset we do this. */ void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: sk->sk_err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: sk->sk_err = EPIPE; break; case TCP_CLOSE: return; default: sk->sk_err = ECONNRESET; } /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); tcp_done(sk); } /* * Process the FIN bit. This now behaves as it is supposed to work * and the FIN takes effect when it is validly part of sequence * space. Not before when we get holes. * * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT * (and thence onto LAST-ACK and finally, CLOSE, we never enter * TIME-WAIT) * * If we are in FINWAIT-1, a received FIN indicates simultaneous * close and we go into CLOSING (and later onto TIME-WAIT) * * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. */ static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: case TCP_CLOSING: /* Received a retransmission of the FIN, do * nothing. */ break; case TCP_LAST_ACK: /* RFC793: Remain in the LAST-ACK state. */ break; case TCP_FIN_WAIT1: /* This case occurs when a simultaneous close * happens, we must ack the received FIN and * enter the CLOSING state. */ tcp_send_ack(sk); tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: /* Received a FIN -- send ACK and enter TIME_WAIT. */ tcp_send_ack(sk); tcp_time_wait(sk, TCP_TIME_WAIT, 0); break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ pr_err("%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } } static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (before(seq, sp->start_seq)) sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; return true; } return false; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) mib_idx = LINUX_MIB_TCPDSACKOLDSENT; else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; } } static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->rx_opt.dsack) tcp_dsack_set(sk, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); } } tcp_send_ack(sk); } /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *swalk = sp + 1; /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ tp->rx_opt.num_sacks--; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; } this_sack++, swalk++; } } static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) goto new_sack; for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { if (tcp_sack_extend(sp, seq, end_seq)) { /* Rotate this_sack to the first one. */ for (; this_sack > 0; this_sack--, sp--) swap(*sp, *(sp - 1)); if (cur_sacks > 1) tcp_sack_maybe_coalesce(tp); return; } } /* Could not find an adjacent existing SACK, build a new one, * put it at the front, and shift everyone else down. We * always know there is at least one SACK present already here. * * If the sack array is full, forget about the last one. */ if (this_sack >= TCP_NUM_SACKS) { this_sack--; tp->rx_opt.num_sacks--; sp--; } for (; this_sack > 0; this_sack--, sp--) *sp = *(sp - 1); new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; } /* RCV.NXT advances, some SACKs should be eaten. */ static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } for (this_sack = 0; this_sack < num_sacks;) { /* Check if the start of the sack is covered by RCV.NXT. */ if (!before(tp->rcv_nxt, sp->start_seq)) { int i; /* RCV.NXT must cover all the block! */ WARN_ON(before(tp->rcv_nxt, sp->end_seq)); /* Zap this SACK, by moving forward any other SACKS. */ for (i=this_sack+1; i < num_sacks; i++) tp->selective_acks[i-1] = tp->selective_acks[i]; num_sacks--; continue; } this_sack++; sp++; } tp->rx_opt.num_sacks = num_sacks; } /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { __u32 dsack = dsack_high; if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); __skb_unlink(skb, &tp->out_of_order_queue); __kfree_skb(skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tcp_hdr(skb)->fin) tcp_fin(sk); } } static bool tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, size)) { if (tcp_prune_queue(sk) < 0) return -1; if (!sk_rmem_schedule(sk, skb, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; if (!sk_rmem_schedule(sk, skb, size)) return -1; } } return 0; } /** * tcp_try_coalesce - try to merge skb to prior one * @sk: socket * @to: prior buffer * @from: buffer to add in queue * @fragstolen: pointer to boolean * * Before queueing skb @from after @to, try to merge them * to reduce overall memory use and queue lengths, if cost is small. * Packets in ofo or receive queues can stay a long time. * Better try to coalesce them right now to avoid future collapses. * Returns true if caller should free @from instead of queueing it */ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from, bool *fragstolen) { int delta; *fragstolen = false; if (tcp_hdr(from)->fin) return false; /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; return true; } static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb1; u32 seq, end_seq; TCP_ECN_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); __kfree_skb(skb); return; } /* Disable header prediction. */ tp->pred_flags = 0; inet_csk_schedule_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); skb1 = skb_peek_tail(&tp->out_of_order_queue); if (!skb1) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; } __skb_queue_head(&tp->out_of_order_queue, skb); goto end; } seq = TCP_SKB_CB(skb)->seq; end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { bool fragstolen; if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { kfree_skb_partial(skb, fragstolen); skb = NULL; } if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; /* Common case: data arrive in order after hole. */ tp->selective_acks[0].end_seq = end_seq; goto end; } /* Find place to insert this segment. */ while (1) { if (!after(TCP_SKB_CB(skb1)->seq, seq)) break; if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { skb1 = NULL; break; } skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); } /* Do skb overlap to previous one? */ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; } if (after(seq, TCP_SKB_CB(skb1)->seq)) { /* Partial overlap. */ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); } else { if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) skb1 = NULL; else skb1 = skb_queue_prev( &tp->out_of_order_queue, skb1); } } if (!skb1) __skb_queue_head(&tp->out_of_order_queue, skb); else __skb_queue_after(&tp->out_of_order_queue, skb1, skb); /* And clean segments covered by new one as whole. */ while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { skb1 = skb_queue_next(&tp->out_of_order_queue, skb); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, end_seq); break; } __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __kfree_skb(skb1); } add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) skb_set_owner_r(skb, sk); } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, bool *fragstolen) { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); __skb_pull(skb, hdrlen); eaten = (tail && tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (!eaten) { __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); } return eaten; } int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_buff *skb = NULL; struct tcphdr *th; bool fragstolen; if (size == 0) return 0; skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); if (!skb) goto err; if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) goto err_free; th = (struct tcphdr *)skb_put(skb, sizeof(*th)); skb_reset_transport_header(skb); memset(th, 0, sizeof(*th)); if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { WARN_ON_ONCE(fragstolen); /* should not happen */ __kfree_skb(skb); } return size; err_free: kfree_skb(skb); err: return -ENOMEM; } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; bool fragstolen = false; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; skb_dst_drop(skb); __skb_pull(skb, th->doff * 4); TCP_ECN_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) goto out_of_window; /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); local_bh_enable(); if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } local_bh_disable(); } if (eaten <= 0) { queue_and_out: if (eaten < 0 && tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto drop; eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) tcp_event_data_recv(sk, skb); if (th->fin) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (skb_queue_empty(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk); if (eaten > 0) kfree_skb_partial(skb, fragstolen); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: __kfree_skb(skb); return; } /* Out of window. F.e. zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; tcp_enter_quickack_mode(sk); if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) goto out_of_window; goto queue_and_out; } tcp_data_queue_ofo(sk, skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next = NULL; if (!skb_queue_is_last(list, skb)) next = skb_queue_next(list, skb); __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * * If tail is NULL, this means until the end of the list. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { struct sk_buff *skb, *n; bool end_of_skbs; /* First, check that queue is collapsible and find * the point where collapsing can be useful. */ skb = head; restart: end_of_skbs = true; skb_queue_walk_from_safe(list, skb, n) { if (skb == tail) break; /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb) break; goto restart; } /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or * overlaps to the next one. */ if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; break; } if (!skb_queue_is_last(list, skb)) { struct sk_buff *next = skb_queue_next(list, skb); if (next != tail && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { end_of_skbs = false; break; } } /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; while (before(start, end)) { struct sk_buff *nskb; unsigned int header = skb_headroom(skb); int copy = SKB_MAX_ORDER(header, 0); /* Too big header? This can happen with IPv6. */ if (copy < 0) return; if (end - start < copy) copy = end - start; nskb = alloc_skb(copy + header, GFP_ATOMIC); if (!nskb) return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); skb_set_network_header(nskb, (skb_network_header(skb) - skb->head)); skb_set_transport_header(nskb, (skb_transport_header(skb) - skb->head)); skb_reserve(nskb, header); memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ while (copy > 0) { int offset = start - TCP_SKB_CB(skb)->seq; int size = TCP_SKB_CB(skb)->end_seq - start; BUG_ON(offset < 0); if (size > 0) { size = min(copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; copy -= size; start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; } } } } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs * and tcp_collapse() them until all the queue is collapsed. */ static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; if (skb == NULL) return; start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; head = skb; for (;;) { struct sk_buff *next = NULL; if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) next = skb_queue_next(&tp->out_of_order_queue, skb); skb = next; /* Segment is terminated when we see gap or when * we are at the end of all the queue. */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { tcp_collapse(sk, &tp->out_of_order_queue, head, skb, start, end); head = skb; if (!skb) break; /* Start new segment */ start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; } else { if (before(TCP_SKB_CB(skb)->seq, start)) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; } } } /* * Purge the out-of-order queue. * Return true if queue was pruned. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection * is in a sad state like this, we care only about integrity * of the connection not performance. */ if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); res = true; } return res; } /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */ static int tcp_prune_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (sk_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* If we are really being abused, tell the caller to silently * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; return -1; } /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, * and if application hit its sndbuf limit recently. */ void tcp_cwnd_application_limited(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { /* Limited by application or receiver window. */ u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); u32 win_used = max(tp->snd_cwnd_used, init_win); if (win_used < tp->snd_cwnd) { tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; } tp->snd_cwnd_used = 0; } tp->snd_cwnd_stamp = tcp_time_stamp; } static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* If the user specified a specific send buffer setting, do * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return false; /* If we are under global TCP memory pressure, do not expand. */ if (sk_under_memory_pressure(sk)) return false; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) return false; /* If we filled the congestion window, do not expand. */ if (tp->packets_out >= tp->snd_cwnd) return false; return true; } /* When incoming ACK allowed to free some skb from write_queue, * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket * on the exit from tcp input handler. * * PROBLEM: sndbuf expansion does not work well with largesend. */ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_should_expand_sndbuf(sk)) { int sndmem = SKB_TRUESIZE(max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER); int demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering + 1); sndmem *= 2 * demanded; if (sndmem > sk->sk_sndbuf) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); tp->snd_cwnd_stamp = tcp_time_stamp; } sk->sk_write_space(sk); } static void tcp_check_space(struct sock *sk) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); } } static inline void tcp_data_snd_check(struct sock *sk) { tcp_push_pending_frames(sk); tcp_check_space(sk); } /* * Check if sending an ack is needed. */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) * sysctl_tcp_delack_seg && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ __tcp_select_window(sk) >= tp->rcv_wnd) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ (ofo_possible && skb_peek(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { /* Else, send delayed ack. */ tcp_send_delayed_ack(sk); } } static inline void tcp_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ return; } __tcp_ack_snd_check(sk, 1); } /* * This routine is only called when we have urgent data * signaled. Its the 'slow' part of tcp_urg. It could be * moved inline now as tcp_urg is only called from one * place. We handle URGent data wrong. We have to - as * BSD still doesn't use the correction from RFC961. * For 1003.1g we should support a new option TCP_STDURG to permit * either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) ptr--; ptr += ntohl(th->seq); /* Ignore urgent data that we've already seen and read. */ if (after(tp->copied_seq, ptr)) return; /* Do not replay urg ptr. * * NOTE: interesting situation not covered by specs. * Misbehaving sender may send urg ptr, pointing to segment, * which we already have in ofo queue. We are not able to fetch * such data and will stay in TCP_URG_NOTYET until will be eaten * by recvmsg(). Seems, we are not obliged to handle such wicked * situations. But it is worth to think about possibility of some * DoSes using some hypothetical application level deadlock. */ if (before(ptr, tp->rcv_nxt)) return; /* Do we already have a newer (or duplicate) urgent pointer? */ if (tp->urg_data && !after(ptr, tp->urg_seq)) return; /* Tell the world about our new urgent pointer. */ sk_send_sigurg(sk); /* We may be adding urgent data when the last byte read was * urgent. To do this requires some care. We cannot just ignore * tp->copied_seq since we would read the last urgent byte again * as data, nor can we alter copied_seq until this data arrives * or we break the semantics of SIOCATMARK (and thus sockatmark()) * * NOTE. Double Dutch. Rendering to plain English: author of comment * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * and expect that both A and B disappear from stream. This is _wrong_. * Though this happens in BSD with high probability, this is occasional. * Any application relying on this is buggy. Note also, that fix "works" * only in this artificial test. Insert some normal data between A and B and we will * decline of BSD again. Verdict: it is better to remove to trap * buggy users. */ if (tp->urg_seq == tp->copied_seq && tp->urg_data && !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tp->copied_seq++; if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } } tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; /* Disable header prediction. */ tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ if (tp->urg_data == TCP_URG_NOTYET) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; /* Is the urgent pointer pointing into this packet? */ if (ptr < skb->len) { u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); tp->urg_data = TCP_URG_VALID | tmp; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); } } } static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); else err = skb_copy_and_csum_datagram_iovec(skb, hlen, tp->ucopy.iov); if (!err) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); } local_bh_disable(); return err; } static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); } else { result = __tcp_checksum_complete(skb); } return result; } static inline bool tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } #ifdef CONFIG_NET_DMA static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int dma_cookie; bool copied_early = false; if (tp->ucopy.wakeup) return false; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); if (dma_cookie < 0) goto out; tp->ucopy.dma_cookie = dma_cookie; copied_early = true; tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); if ((tp->ucopy.len == 0) || (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } } else if (chunk > 0) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } out: return copied_early; } #endif /* CONFIG_NET_DMA */ /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ } /* Step 1: check sequence number */ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { /* RFC793, page 37: "In all states except SYN-SENT, all reset * (RST) segments are validated by checking their SEQ-fields." * And page 69: "If an incoming segment is not acceptable, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ if (!th->rst) { if (th->syn) goto syn_challenge; tcp_send_dupack(sk, skb); } goto discard; } /* Step 2: check RST bit */ if (th->rst) { /* RFC 5961 3.2 : * If sequence number exactly matches RCV.NXT, then * RESET the connection * else * Send a challenge ACK */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) tcp_reset(sk); else tcp_send_challenge_ack(sk); goto discard; } /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN * RFC 5691 4.2 : Send a challenge ack */ if (th->syn) { syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_send_challenge_ack(sk); goto discard; } return true; discard: __kfree_skb(skb); return false; } /* * TCP receive function for the ESTABLISHED state. * * It is split into a fast path and a slow path. The fast path is * disabled when: * - A zero window was announced from us - zero window probing * is only handled properly in the slow path. * - Out of order segments arrived. * - Urgent data is expected. * - There is no buffer space left * - Unexpected TCP flags/window values/header lengths are received * (detected by checking the TCP header against pred_flags) * - Data is sent in both directions. Fast path only supports pure senders * or pure receivers (this means either the sequence number or the ack * value must stay constant) * - Unexpected TCP option. * * When these conditions are not satisfied it drops into a standard * receive procedure patterned after RFC793 to handle all cases. * The first three cases are guaranteed by proper pred_flags setting, * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); if (unlikely(sk->sk_rx_dst == NULL)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous * "30 instruction TCP receive" Van Jacobson mail. * * Van's trick is to deposit buffers into socket queue * on a device interrupt, to call tcp_recv function * on the receive process context and checksum and copy * the buffer to user space. smart... * * Our current scheme is not silly either but we take the * extra cost of the net_bh soft interrupt processing... * We do checksum and copy also but from device to kernel. */ tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to * turn it off (when there are holes in the receive * space for instance) * PSH flag is ignored. */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags * match. */ /* Check timestamp */ if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { /* No? Slow path! */ if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; /* If PAWS failed, check it more carefully in slow path */ if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails * and timestamp was corrupted part, it will result * in a hung connection since we will drop all * future packets due to the PAWS test. */ } if (len <= tcp_header_len) { /* Bulk data transfer: sender */ if (len == tcp_header_len) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); /* We know that such packets are checksummed * on entry. */ tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { int eaten = 0; int copied_early = 0; bool fragstolen = false; if (tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) { #ifdef CONFIG_NET_DMA if (tp->ucopy.task == current && sock_owned_by_user(sk) && tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { copied_early = 1; eaten = 1; } #endif if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) eaten = 1; } if (eaten) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); } if (copied_early) tcp_cleanup_rbuf(sk, skb->len); } if (!eaten) { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; if ((int)skb->truesize > sk->sk_forward_alloc) goto step5; /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ eaten = tcp_queue_rcv(sk, skb, tcp_header_len, &fragstolen); } tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } if (!copied_early || tp->rcv_nxt != tp->rcv_wup) __tcp_ack_snd_check(sk, 0); no_ack: #ifdef CONFIG_NET_DMA if (copied_early) __skb_queue_tail(&sk->sk_async_wait_queue, skb); else #endif if (eaten) kfree_skb_partial(skb, fragstolen); sk->sk_data_ready(sk, 0); return 0; } } slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; if (!th->ack && !th->rst && !th->syn) goto discard; /* * Standard slow path. */ if (!tcp_validate_incoming(sk, skb, th, 1)) return 0; step5: if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ tcp_data_queue(sk, skb); tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return 0; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; } EXPORT_SYMBOL(tcp_rcv_established); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); if (skb != NULL) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ tp->lsndtime = tcp_time_stamp; tcp_init_buffer_space(sk); if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, struct tcp_fastopen_cookie *cookie) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; u16 mss = tp->rx_opt.mss_clamp; bool syn_drop; if (mss == tp->rx_opt.user_mss) { struct tcp_options_received opt; /* Get original SYNACK MSS value if user MSS sets mss_clamp */ tcp_clear_options(&opt); opt.user_mss = opt.mss_clamp = 0; tcp_parse_options(synack, &opt, 0, NULL); mss = opt.mss_clamp; } if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ cookie->len = -1; /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably * the remote receives only the retransmitted (regular) SYNs: either * the original SYN-data or the corresponding SYN-ACK is lost. */ syn_drop = (cookie->len <= 0 && data && tp->total_retrans); tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); if (data) { /* Retransmit unacked data in SYN */ tcp_for_write_queue_from(data, sk) { if (data == tcp_send_head(sk) || __tcp_retransmit_skb(sk, data)) break; } tcp_rearm_rto(sk); return true; } tp->syn_data_acked = tp->syn_data; return false; } static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; if (th->ack) { /* rfc793: * "If the state is SYN-SENT then * first check the ACK bit * If the ACK bit is set * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send * a reset (unless the RST bit is set, if so drop * the segment and return)" */ if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) goto reset_and_undo; if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } /* Now ACK is acceptable. * * "If the RST bit is set * If the ACK was acceptable then signal the user "error: * connection reset", drop the segment, enter CLOSED state, * delete TCB, and return." */ if (th->rst) { tcp_reset(sk); goto discard; } /* rfc793: * "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." * * See note below! * --ANK(990513) */ if (!th->syn) goto discard_and_undo; /* rfc793: * "If the SYN bit is on ... * are acceptable then ... * (our SYN has been ACKed), change the connection * state to ESTABLISHED..." */ TCP_ECN_rcv_synack(tp, th); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH); /* Ok.. it's good. Set up sequence numbers and * move to established. */ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tcp_store_ts_recent(tp); } else { tp->tcp_header_len = sizeof(struct tcphdr); } if (tcp_is_sack(tp) && sysctl_tcp_fack) tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); /* Remember, tcp_poll() does not lock socket! * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; smp_mb(); tcp_finish_connect(sk, skb); if ((tp->syn_fastopen || tp->syn_data) && tcp_rcv_fastopen_synack(sk, skb, &foc)) return -1; if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } return -1; } /* No ACK in the segment */ if (th->rst) { /* rfc793: * "If the RST bit is set * * Otherwise (no ACK) drop the segment and return." */ goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. * Particularly, it can be connect to self. */ tcp_set_state(sk, TCP_SYN_RECV); if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { tp->tcp_header_len = sizeof(struct tcphdr); } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; TCP_ECN_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. * There are no obstacles to make this (except that we must * either change tcp_recvmsg() to prevent it from returning data * before 3WHS completes per RFC793, or employ TCP Fast Open). * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. * Also, seems the code doing it in step6 of tcp_rcv_state_process * is not flawless. So, discard packet for sanity. * Uncomment this return to process the data. */ return -1; #else goto discard; #endif } /* "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." */ discard_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; return 1; } /* * This function implements the receiving procedure of RFC 793 for * all states except ESTABLISHED and TIME_WAIT. * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be * address independent. */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock *req; int queued = 0; tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: goto discard; case TCP_LISTEN: if (th->ack) return 1; if (th->rst) goto discard; if (th->syn) { if (th->fin) goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the * syn up to the [to be] advertised window and * Solaris 2.1 gives you a protocol error. For now * we just ignore it, that fits the spec precisely * and avoids incompatibilities. It would be nice in * future to drop through and process the data. * * Now that TTCP is starting to be used we ought to * queue this data. * But, this leaves one open to an easy denial of * service attack, and SYN cookies can't defend * against this problem. So, we drop the data * in the interest of security over speed unless * it's still in use. */ kfree_skb(skb); return 0; } goto discard; case TCP_SYN_SENT: queued = tcp_rcv_synsent_state_process(sk, skb, th, len); if (queued >= 0) return queued; /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } req = tp->fastopen_rsk; if (req != NULL) { WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); if (tcp_check_req(sk, skb, req, NULL, true) == NULL) goto discard; } if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) return 0; /* step 5: check the ACK field */ if (true) { int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: if (acceptable) { /* Once we leave TCP_SYN_RECV, we no longer * need req so release it. */ if (req) { tcp_synack_rtt_meas(sk, req); tp->total_retrans = req->num_retrans; reqsk_fastopen_remove(sk, req, false); } else { /* Make sure socket is routed, for * correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_congestion_control(sk); tcp_mtup_init(sk); tcp_init_buffer_space(sk); tp->copied_seq = tp->rcv_nxt; } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); /* Note, that this wakeup is only for marginal * crossed SYN case. Passively open sockets * are not waked up, because sk->sk_sleep == * NULL and sk->sk_socket == NULL. */ if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; if (req) { /* Re-arm the timer because data may * have been sent out. This is similar * to the regular data transmission case * when new data has just been ack'ed. * * (TFO) - we could try to be more * aggressive and retranmitting any data * sooner based on when they were sent * out. */ tcp_rearm_rto(sk); } else tcp_init_metrics(sk); tcp_update_pacing_rate(sk); /* Prevent spurious tcp_cwnd_restart() on * first data packet. */ tp->lsndtime = tcp_time_stamp; tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); } else { return 1; } break; case TCP_FIN_WAIT1: /* If we enter the TCP_FIN_WAIT1 state and we are a * Fast Open socket and this is the first acceptable * ACK we have received, this would have acknowledged * our SYNACK so stop the SYNACK timer. */ if (req != NULL) { /* Return RST if ack_seq is invalid. * Note that RFC793 only says to generate a * DUPACK for it but for TCP Fast Open it seems * better to treat this case like TCP_SYN_RECV * above. */ if (!acceptable) return 1; /* We no longer need the request sock. */ reqsk_fastopen_remove(sk, req, false); tcp_rearm_rto(sk); } if (tp->snd_una == tp->write_seq) { struct dst_entry *dst; tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); if (!sock_flag(sk, SOCK_DEAD)) /* Wake up lingering close() */ sk->sk_state_change(sk); else { int tmo; if (tp->linger2 < 0 || (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, * if it spins in bh_lock_sock(), but it is really * marginal case. */ inet_csk_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto discard; } } } break; case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { tcp_time_wait(sk, TCP_TIME_WAIT, 0); goto discard; } break; case TCP_LAST_ACK: if (tp->snd_una == tp->write_seq) { tcp_update_metrics(sk); tcp_done(sk); goto discard; } break; } } /* step 6: check the URG bit */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ switch (sk->sk_state) { case TCP_CLOSE_WAIT: case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: /* RFC 793 says to queue data in these states, * RFC 1122 says we MUST send a reset. * BSD 4.4 also does reset. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } } /* Fall through */ case TCP_ESTABLISHED: tcp_data_queue(sk, skb); queued = 1; break; } /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL(tcp_rcv_state_process);
GameTheory-/android_kernel_g4stylusn2
net/ipv4/tcp_input.c
C
gpl-2.0
170,266
/* mpq_div -- divide two rational numbers. Copyright 1991, 1994, 1995, 1996, 2000, 2001 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ #include "gmp.h" #include "gmp-impl.h" void mpq_div (mpq_ptr quot, mpq_srcptr op1, mpq_srcptr op2) { mpz_t gcd1, gcd2; mpz_t tmp1, tmp2; mpz_t numtmp; mp_size_t op1_num_size; mp_size_t op1_den_size; mp_size_t op2_num_size; mp_size_t op2_den_size; mp_size_t alloc; TMP_DECL; op2_num_size = ABSIZ(NUM(op2)); if (UNLIKELY (op2_num_size == 0)) DIVIDE_BY_ZERO; op1_num_size = ABSIZ(NUM(op1)); if (op1_num_size == 0) { /* We special case this to simplify allocation logic; gcd(0,x) = x is a singular case for the allocations. */ SIZ(NUM(quot)) = 0; PTR(DEN(quot))[0] = 1; SIZ(DEN(quot)) = 1; return; } op2_den_size = SIZ(DEN(op2)); op1_den_size = SIZ(DEN(op1)); TMP_MARK; alloc = MIN (op1_num_size, op2_num_size); MPZ_TMP_INIT (gcd1, alloc); alloc = MIN (op1_den_size, op2_den_size); MPZ_TMP_INIT (gcd2, alloc); alloc = MAX (op1_num_size, op2_num_size); MPZ_TMP_INIT (tmp1, alloc); alloc = MAX (op1_den_size, op2_den_size); MPZ_TMP_INIT (tmp2, alloc); alloc = op1_num_size + op2_den_size; MPZ_TMP_INIT (numtmp, alloc); /* QUOT might be identical to either operand, so don't store the result there until we are finished with the input operands. We can overwrite the numerator of QUOT when we are finished with the numerators of OP1 and OP2. */ mpz_gcd (gcd1, NUM(op1), NUM(op2)); mpz_gcd (gcd2, DEN(op2), DEN(op1)); mpz_divexact_gcd (tmp1, NUM(op1), gcd1); mpz_divexact_gcd (tmp2, DEN(op2), gcd2); mpz_mul (numtmp, tmp1, tmp2); mpz_divexact_gcd (tmp1, NUM(op2), gcd1); mpz_divexact_gcd (tmp2, DEN(op1), gcd2); mpz_mul (DEN(quot), tmp1, tmp2); /* We needed to go via NUMTMP to take care of QUOT being the same as OP2. Now move NUMTMP to QUOT->_mp_num. */ mpz_set (NUM(quot), numtmp); /* Keep the denominator positive. */ if (SIZ(DEN(quot)) < 0) { SIZ(DEN(quot)) = -SIZ(DEN(quot)); SIZ(NUM(quot)) = -SIZ(NUM(quot)); } TMP_FREE; }
carthy/beard.gmp
mpq/div.c
C
gpl-3.0
2,833
/* * ref.c: reference counting * * Copyright (C) 2009-2011 David Lutterkort * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: David Lutterkort <lutter@redhat.com> */ #include <config.h> #include "ref.h" #include <stdlib.h> int ref_make_ref(void *ptrptr, size_t size, size_t ref_ofs) { *(void**) ptrptr = calloc(1, size); if (*(void **)ptrptr == NULL) { return -1; } else { void *ptr = *(void **)ptrptr; *((ref_t *) ((char*) ptr + ref_ofs)) = 1; return 0; } } /* * Local variables: * indent-tabs-mode: nil * c-indent-level: 4 * c-basic-offset: 4 * tab-width: 4 * End: */
camptocamp/augeas-debian
src/ref.c
C
lgpl-2.1
1,339
/** ****************************************************************************** * @file stm32h7xx_hal_sram.c * @author MCD Application Team * @brief SRAM HAL module driver. * This file provides a generic firmware to drive SRAM memories * mounted as external device. * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] This driver is a generic layered driver which contains a set of APIs used to control SRAM memories. It uses the FMC layer functions to interface with SRAM devices. The following sequence should be followed to configure the FMC to interface with SRAM/PSRAM memories: (#) Declare a SRAM_HandleTypeDef handle structure, for example: SRAM_HandleTypeDef hsram; and: (++) Fill the SRAM_HandleTypeDef handle "Init" field with the allowed values of the structure member. (++) Fill the SRAM_HandleTypeDef handle "Instance" field with a predefined base register instance for NOR or SRAM device (++) Fill the SRAM_HandleTypeDef handle "Extended" field with a predefined base register instance for NOR or SRAM extended mode (#) Declare two FMC_NORSRAM_TimingTypeDef structures, for both normal and extended mode timings; for example: FMC_NORSRAM_TimingTypeDef Timing and FMC_NORSRAM_TimingTypeDef ExTiming; and fill its fields with the allowed values of the structure member. (#) Initialize the SRAM Controller by calling the function HAL_SRAM_Init(). This function performs the following sequence: (##) MSP hardware layer configuration using the function HAL_SRAM_MspInit() (##) Control register configuration using the FMC NORSRAM interface function FMC_NORSRAM_Init() (##) Timing register configuration using the FMC NORSRAM interface function FMC_NORSRAM_Timing_Init() (##) Extended mode Timing register configuration using the FMC NORSRAM interface function FMC_NORSRAM_Extended_Timing_Init() (##) Enable the SRAM device using the macro __FMC_NORSRAM_ENABLE() (#) At this stage you can perform read/write accesses from/to the memory connected to the NOR/SRAM Bank. You can perform either polling or DMA transfer using the following APIs: (++) HAL_SRAM_Read()/HAL_SRAM_Write() for polling read/write access (++) HAL_SRAM_Read_DMA()/HAL_SRAM_Write_DMA() for DMA read/write transfer (#) You can also control the SRAM device by calling the control APIs HAL_SRAM_WriteOperation_Enable()/ HAL_SRAM_WriteOperation_Disable() to respectively enable/disable the SRAM write operation (#) You can continuously monitor the SRAM device HAL state by calling the function HAL_SRAM_GetState() *** Callback registration *** ============================================= [..] The compilation define USE_HAL_SRAM_REGISTER_CALLBACKS when set to 1 allows the user to configure dynamically the driver callbacks. Use Functions @ref HAL_SRAM_RegisterCallback() to register a user callback, it allows to register following callbacks: (+) MspInitCallback : SRAM MspInit. (+) MspDeInitCallback : SRAM MspDeInit. This function takes as parameters the HAL peripheral handle, the Callback ID and a pointer to the user callback function. Use function @ref HAL_SRAM_UnRegisterCallback() to reset a callback to the default weak (surcharged) function. It allows to reset following callbacks: (+) MspInitCallback : SRAM MspInit. (+) MspDeInitCallback : SRAM MspDeInit. This function) takes as parameters the HAL peripheral handle and the Callback ID. By default, after the @ref HAL_SRAM_Init and if the state is HAL_SRAM_STATE_RESET all callbacks are reset to the corresponding legacy weak (surcharged) functions. Exception done for MspInit and MspDeInit callbacks that are respectively reset to the legacy weak (surcharged) functions in the @ref HAL_SRAM_Init and @ref HAL_SRAM_DeInit only when these callbacks are null (not registered beforehand). If not, MspInit or MspDeInit are not null, the @ref HAL_SRAM_Init and @ref HAL_SRAM_DeInit keep and use the user MspInit/MspDeInit callbacks (registered beforehand) Callbacks can be registered/unregistered in READY state only. Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. In that case first register the MspInit/MspDeInit user callbacks using @ref HAL_SRAM_RegisterCallback before calling @ref HAL_SRAM_DeInit or @ref HAL_SRAM_Init function. When The compilation define USE_HAL_SRAM_REGISTER_CALLBACKS is set to 0 or not defined, the callback registering feature is not available and weak (surcharged) callbacks are used. @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2017 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32h7xx_hal.h" /** @addtogroup STM32H7xx_HAL_Driver * @{ */ #ifdef HAL_SRAM_MODULE_ENABLED /** @defgroup SRAM SRAM * @brief SRAM driver modules * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ static void SRAM_DMACplt(MDMA_HandleTypeDef *hmdma); static void SRAM_DMACpltProt(MDMA_HandleTypeDef *hmdma); static void SRAM_DMAError(MDMA_HandleTypeDef *hmdma); /* Exported functions --------------------------------------------------------*/ /** @defgroup SRAM_Exported_Functions SRAM Exported Functions * @{ */ /** @defgroup SRAM_Exported_Functions_Group1 Initialization and de-initialization functions * @brief Initialization and Configuration functions. * @verbatim ============================================================================== ##### SRAM Initialization and de_initialization functions ##### ============================================================================== [..] This section provides functions allowing to initialize/de-initialize the SRAM memory @endverbatim * @{ */ /** * @brief Performs the SRAM device initialization sequence * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param Timing Pointer to SRAM control timing structure * @param ExtTiming Pointer to SRAM extended mode timing structure * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Init(SRAM_HandleTypeDef *hsram, FMC_NORSRAM_TimingTypeDef *Timing, FMC_NORSRAM_TimingTypeDef *ExtTiming) { /* Check the SRAM handle parameter */ if (hsram == NULL) { return HAL_ERROR; } if (hsram->State == HAL_SRAM_STATE_RESET) { /* Allocate lock resource and initialize it */ hsram->Lock = HAL_UNLOCKED; #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) if (hsram->MspInitCallback == NULL) { hsram->MspInitCallback = HAL_SRAM_MspInit; } hsram->DmaXferCpltCallback = HAL_SRAM_DMA_XferCpltCallback; hsram->DmaXferErrorCallback = HAL_SRAM_DMA_XferErrorCallback; /* Init the low level hardware */ hsram->MspInitCallback(hsram); #else /* Initialize the low level hardware (MSP) */ HAL_SRAM_MspInit(hsram); #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ } /* Initialize SRAM control Interface */ (void)FMC_NORSRAM_Init(hsram->Instance, &(hsram->Init)); /* Initialize SRAM timing Interface */ (void)FMC_NORSRAM_Timing_Init(hsram->Instance, Timing, hsram->Init.NSBank); /* Initialize SRAM extended mode timing Interface */ (void)FMC_NORSRAM_Extended_Timing_Init(hsram->Extended, ExtTiming, hsram->Init.NSBank, hsram->Init.ExtendedMode); /* Enable the NORSRAM device */ __FMC_NORSRAM_ENABLE(hsram->Instance, hsram->Init.NSBank); /* Enable FMC Peripheral */ __FMC_ENABLE(); /* Initialize the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; return HAL_OK; } /** * @brief Performs the SRAM device De-initialization sequence. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_DeInit(SRAM_HandleTypeDef *hsram) { #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) if (hsram->MspDeInitCallback == NULL) { hsram->MspDeInitCallback = HAL_SRAM_MspDeInit; } /* DeInit the low level hardware */ hsram->MspDeInitCallback(hsram); #else /* De-Initialize the low level hardware (MSP) */ HAL_SRAM_MspDeInit(hsram); #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ /* Configure the SRAM registers with their reset values */ (void)FMC_NORSRAM_DeInit(hsram->Instance, hsram->Extended, hsram->Init.NSBank); /* Reset the SRAM controller state */ hsram->State = HAL_SRAM_STATE_RESET; /* Release Lock */ __HAL_UNLOCK(hsram); return HAL_OK; } /** * @brief SRAM MSP Init. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval None */ __weak void HAL_SRAM_MspInit(SRAM_HandleTypeDef *hsram) { /* Prevent unused argument(s) compilation warning */ UNUSED(hsram); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_SRAM_MspInit could be implemented in the user file */ } /** * @brief SRAM MSP DeInit. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval None */ __weak void HAL_SRAM_MspDeInit(SRAM_HandleTypeDef *hsram) { /* Prevent unused argument(s) compilation warning */ UNUSED(hsram); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_SRAM_MspDeInit could be implemented in the user file */ } /** * @brief DMA transfer complete callback. * @param hmdma pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval None */ __weak void HAL_SRAM_DMA_XferCpltCallback(MDMA_HandleTypeDef *hmdma) { /* Prevent unused argument(s) compilation warning */ UNUSED(hmdma); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_SRAM_DMA_XferCpltCallback could be implemented in the user file */ } /** * @brief DMA transfer complete error callback. * @param hmdma pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval None */ __weak void HAL_SRAM_DMA_XferErrorCallback(MDMA_HandleTypeDef *hmdma) { /* Prevent unused argument(s) compilation warning */ UNUSED(hmdma); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_SRAM_DMA_XferErrorCallback could be implemented in the user file */ } /** * @} */ /** @defgroup SRAM_Exported_Functions_Group2 Input Output and memory control functions * @brief Input Output and memory control functions * @verbatim ============================================================================== ##### SRAM Input and Output functions ##### ============================================================================== [..] This section provides functions allowing to use and control the SRAM memory @endverbatim * @{ */ /** * @brief Reads 8-bit buffer from SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to read start address * @param pDstBuffer Pointer to destination buffer * @param BufferSize Size of the buffer to read from memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Read_8b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint8_t *pDstBuffer, uint32_t BufferSize) { uint32_t size; __IO uint8_t *psramaddress = (uint8_t *)pAddress; uint8_t *pdestbuff = pDstBuffer; HAL_SRAM_StateTypeDef state = hsram->State; /* Check the SRAM controller state */ if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Read data from memory */ for (size = BufferSize; size != 0U; size--) { *pdestbuff = *psramaddress; pdestbuff++; psramaddress++; } /* Update the SRAM controller state */ hsram->State = state; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Writes 8-bit buffer to SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to write start address * @param pSrcBuffer Pointer to source buffer to write * @param BufferSize Size of the buffer to write to memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Write_8b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint8_t *pSrcBuffer, uint32_t BufferSize) { uint32_t size; __IO uint8_t *psramaddress = (uint8_t *)pAddress; uint8_t *psrcbuff = pSrcBuffer; /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_READY) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Write data to memory */ for (size = BufferSize; size != 0U; size--) { *psramaddress = *psrcbuff; psrcbuff++; psramaddress++; } /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Reads 16-bit buffer from SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to read start address * @param pDstBuffer Pointer to destination buffer * @param BufferSize Size of the buffer to read from memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Read_16b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint16_t *pDstBuffer, uint32_t BufferSize) { uint32_t size; __IO uint32_t *psramaddress = pAddress; uint16_t *pdestbuff = pDstBuffer; uint8_t limit; HAL_SRAM_StateTypeDef state = hsram->State; /* Check the SRAM controller state */ if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Check if the size is a 32-bits multiple */ limit = (((BufferSize % 2U) != 0U) ? 1U : 0U); /* Read data from memory */ for (size = BufferSize; size != limit; size -= 2U) { *pdestbuff = (uint16_t)((*psramaddress) & 0x0000FFFFU); pdestbuff++; *pdestbuff = (uint16_t)(((*psramaddress) & 0xFFFF0000U) >> 16U); pdestbuff++; psramaddress++; } /* Read last 16-bits if size is not 32-bits multiple */ if (limit != 0U) { *pdestbuff = (uint16_t)((*psramaddress) & 0x0000FFFFU); } /* Update the SRAM controller state */ hsram->State = state; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Writes 16-bit buffer to SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to write start address * @param pSrcBuffer Pointer to source buffer to write * @param BufferSize Size of the buffer to write to memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Write_16b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint16_t *pSrcBuffer, uint32_t BufferSize) { uint32_t size; __IO uint32_t *psramaddress = pAddress; uint16_t *psrcbuff = pSrcBuffer; uint8_t limit; /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_READY) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Check if the size is a 32-bits multiple */ limit = (((BufferSize % 2U) != 0U) ? 1U : 0U); /* Write data to memory */ for (size = BufferSize; size != limit; size -= 2U) { *psramaddress = (uint32_t)(*psrcbuff); psrcbuff++; *psramaddress |= ((uint32_t)(*psrcbuff) << 16U); psrcbuff++; psramaddress++; } /* Write last 16-bits if size is not 32-bits multiple */ if (limit != 0U) { *psramaddress = ((uint32_t)(*psrcbuff) & 0x0000FFFFU) | ((*psramaddress) & 0xFFFF0000U); } /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Reads 32-bit buffer from SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to read start address * @param pDstBuffer Pointer to destination buffer * @param BufferSize Size of the buffer to read from memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Read_32b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint32_t *pDstBuffer, uint32_t BufferSize) { uint32_t size; __IO uint32_t *psramaddress = pAddress; uint32_t *pdestbuff = pDstBuffer; HAL_SRAM_StateTypeDef state = hsram->State; /* Check the SRAM controller state */ if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Read data from memory */ for (size = BufferSize; size != 0U; size--) { *pdestbuff = *psramaddress; pdestbuff++; psramaddress++; } /* Update the SRAM controller state */ hsram->State = state; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Writes 32-bit buffer to SRAM memory. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to write start address * @param pSrcBuffer Pointer to source buffer to write * @param BufferSize Size of the buffer to write to memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Write_32b(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize) { uint32_t size; __IO uint32_t *psramaddress = pAddress; uint32_t *psrcbuff = pSrcBuffer; /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_READY) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Write data to memory */ for (size = BufferSize; size != 0U; size--) { *psramaddress = *psrcbuff; psrcbuff++; psramaddress++; } /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Reads a Words data from the SRAM memory using DMA transfer. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to read start address * @param pDstBuffer Pointer to destination buffer * @param BufferSize Size of the buffer to read from memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Read_DMA(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint32_t *pDstBuffer, uint32_t BufferSize) { HAL_StatusTypeDef status; HAL_SRAM_StateTypeDef state = hsram->State; /* Check the SRAM controller state */ if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Configure DMA user callbacks */ if (state == HAL_SRAM_STATE_READY) { hsram->hmdma->XferCpltCallback = SRAM_DMACplt; } else { hsram->hmdma->XferCpltCallback = SRAM_DMACpltProt; } hsram->hmdma->XferErrorCallback = SRAM_DMAError; /* Enable the DMA Stream */ status = HAL_MDMA_Start_IT(hsram->hmdma, (uint32_t)pAddress, (uint32_t)pDstBuffer, (uint32_t)(BufferSize * 4U), 1); /* Process unlocked */ __HAL_UNLOCK(hsram); } else { status = HAL_ERROR; } return status; } /** * @brief Writes a Words data buffer to SRAM memory using DMA transfer. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @param pAddress Pointer to write start address * @param pSrcBuffer Pointer to source buffer to write * @param BufferSize Size of the buffer to write to memory * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_Write_DMA(SRAM_HandleTypeDef *hsram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize) { HAL_StatusTypeDef status; /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_READY) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Configure DMA user callbacks */ hsram->hmdma->XferCpltCallback = SRAM_DMACplt; hsram->hmdma->XferErrorCallback = SRAM_DMAError; /* Enable the DMA Stream */ status = HAL_MDMA_Start_IT(hsram->hmdma, (uint32_t)pSrcBuffer, (uint32_t)pAddress, (uint32_t)(BufferSize * 4U), 1); /* Process unlocked */ __HAL_UNLOCK(hsram); } else { status = HAL_ERROR; } return status; } #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) /** * @brief Register a User SRAM Callback * To be used instead of the weak (surcharged) predefined callback * @param hsram : SRAM handle * @param CallbackId : ID of the callback to be registered * This parameter can be one of the following values: * @arg @ref HAL_SRAM_MSP_INIT_CB_ID SRAM MspInit callback ID * @arg @ref HAL_SRAM_MSP_DEINIT_CB_ID SRAM MspDeInit callback ID * @param pCallback : pointer to the Callback function * @retval status */ HAL_StatusTypeDef HAL_SRAM_RegisterCallback(SRAM_HandleTypeDef *hsram, HAL_SRAM_CallbackIDTypeDef CallbackId, pSRAM_CallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; HAL_SRAM_StateTypeDef state; if (pCallback == NULL) { return HAL_ERROR; } /* Process locked */ __HAL_LOCK(hsram); state = hsram->State; if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_RESET) || (state == HAL_SRAM_STATE_PROTECTED)) { switch (CallbackId) { case HAL_SRAM_MSP_INIT_CB_ID : hsram->MspInitCallback = pCallback; break; case HAL_SRAM_MSP_DEINIT_CB_ID : hsram->MspDeInitCallback = pCallback; break; default : /* update return status */ status = HAL_ERROR; break; } } else { /* update return status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hsram); return status; } /** * @brief Unregister a User SRAM Callback * SRAM Callback is redirected to the weak (surcharged) predefined callback * @param hsram : SRAM handle * @param CallbackId : ID of the callback to be unregistered * This parameter can be one of the following values: * @arg @ref HAL_SRAM_MSP_INIT_CB_ID SRAM MspInit callback ID * @arg @ref HAL_SRAM_MSP_DEINIT_CB_ID SRAM MspDeInit callback ID * @arg @ref HAL_SRAM_DMA_XFER_CPLT_CB_ID SRAM DMA Xfer Complete callback ID * @arg @ref HAL_SRAM_DMA_XFER_ERR_CB_ID SRAM DMA Xfer Error callback ID * @retval status */ HAL_StatusTypeDef HAL_SRAM_UnRegisterCallback(SRAM_HandleTypeDef *hsram, HAL_SRAM_CallbackIDTypeDef CallbackId) { HAL_StatusTypeDef status = HAL_OK; HAL_SRAM_StateTypeDef state; /* Process locked */ __HAL_LOCK(hsram); state = hsram->State; if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { switch (CallbackId) { case HAL_SRAM_MSP_INIT_CB_ID : hsram->MspInitCallback = HAL_SRAM_MspInit; break; case HAL_SRAM_MSP_DEINIT_CB_ID : hsram->MspDeInitCallback = HAL_SRAM_MspDeInit; break; case HAL_SRAM_DMA_XFER_CPLT_CB_ID : hsram->DmaXferCpltCallback = HAL_SRAM_DMA_XferCpltCallback; break; case HAL_SRAM_DMA_XFER_ERR_CB_ID : hsram->DmaXferErrorCallback = HAL_SRAM_DMA_XferErrorCallback; break; default : /* update return status */ status = HAL_ERROR; break; } } else if (state == HAL_SRAM_STATE_RESET) { switch (CallbackId) { case HAL_SRAM_MSP_INIT_CB_ID : hsram->MspInitCallback = HAL_SRAM_MspInit; break; case HAL_SRAM_MSP_DEINIT_CB_ID : hsram->MspDeInitCallback = HAL_SRAM_MspDeInit; break; default : /* update return status */ status = HAL_ERROR; break; } } else { /* update return status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hsram); return status; } /** * @brief Register a User SRAM Callback for DMA transfers * To be used instead of the weak (surcharged) predefined callback * @param hsram : SRAM handle * @param CallbackId : ID of the callback to be registered * This parameter can be one of the following values: * @arg @ref HAL_SRAM_DMA_XFER_CPLT_CB_ID SRAM DMA Xfer Complete callback ID * @arg @ref HAL_SRAM_DMA_XFER_ERR_CB_ID SRAM DMA Xfer Error callback ID * @param pCallback : pointer to the Callback function * @retval status */ HAL_StatusTypeDef HAL_SRAM_RegisterDmaCallback(SRAM_HandleTypeDef *hsram, HAL_SRAM_CallbackIDTypeDef CallbackId, pSRAM_DmaCallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; HAL_SRAM_StateTypeDef state; if (pCallback == NULL) { return HAL_ERROR; } /* Process locked */ __HAL_LOCK(hsram); state = hsram->State; if ((state == HAL_SRAM_STATE_READY) || (state == HAL_SRAM_STATE_PROTECTED)) { switch (CallbackId) { case HAL_SRAM_DMA_XFER_CPLT_CB_ID : hsram->DmaXferCpltCallback = pCallback; break; case HAL_SRAM_DMA_XFER_ERR_CB_ID : hsram->DmaXferErrorCallback = pCallback; break; default : /* update return status */ status = HAL_ERROR; break; } } else { /* update return status */ status = HAL_ERROR; } /* Release Lock */ __HAL_UNLOCK(hsram); return status; } #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ /** * @} */ /** @defgroup SRAM_Exported_Functions_Group3 Control functions * @brief Control functions * @verbatim ============================================================================== ##### SRAM Control functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to control dynamically the SRAM interface. @endverbatim * @{ */ /** * @brief Enables dynamically SRAM write operation. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_WriteOperation_Enable(SRAM_HandleTypeDef *hsram) { /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_PROTECTED) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Enable write operation */ (void)FMC_NORSRAM_WriteOperation_Enable(hsram->Instance, hsram->Init.NSBank); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @brief Disables dynamically SRAM write operation. * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval HAL status */ HAL_StatusTypeDef HAL_SRAM_WriteOperation_Disable(SRAM_HandleTypeDef *hsram) { /* Check the SRAM controller state */ if (hsram->State == HAL_SRAM_STATE_READY) { /* Process Locked */ __HAL_LOCK(hsram); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_BUSY; /* Disable write operation */ (void)FMC_NORSRAM_WriteOperation_Disable(hsram->Instance, hsram->Init.NSBank); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_PROTECTED; /* Process unlocked */ __HAL_UNLOCK(hsram); } else { return HAL_ERROR; } return HAL_OK; } /** * @} */ /** @defgroup SRAM_Exported_Functions_Group4 Peripheral State functions * @brief Peripheral State functions * @verbatim ============================================================================== ##### SRAM State functions ##### ============================================================================== [..] This subsection permits to get in run-time the status of the SRAM controller and the data flow. @endverbatim * @{ */ /** * @brief Returns the SRAM controller state * @param hsram pointer to a SRAM_HandleTypeDef structure that contains * the configuration information for SRAM module. * @retval HAL state */ HAL_SRAM_StateTypeDef HAL_SRAM_GetState(SRAM_HandleTypeDef *hsram) { return hsram->State; } /** * @} */ /** * @} */ /** * @brief MDMA SRAM process complete callback. * @param hmdma : MDMA handle * @retval None */ static void SRAM_DMACplt(MDMA_HandleTypeDef *hmdma) { SRAM_HandleTypeDef *hsram = (SRAM_HandleTypeDef *)(hmdma->Parent); /* Disable the MDMA channel */ __HAL_MDMA_DISABLE(hmdma); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_READY; #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) hsram->DmaXferCpltCallback(hmdma); #else HAL_SRAM_DMA_XferCpltCallback(hmdma); #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ } /** * @brief MDMA SRAM process complete callback. * @param hmdma : MDMA handle * @retval None */ static void SRAM_DMACpltProt(MDMA_HandleTypeDef *hmdma) { SRAM_HandleTypeDef *hsram = (SRAM_HandleTypeDef *)(hmdma->Parent); /* Disable the MDMA channel */ __HAL_MDMA_DISABLE(hmdma); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_PROTECTED; #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) hsram->DmaXferCpltCallback(hmdma); #else HAL_SRAM_DMA_XferCpltCallback(hmdma); #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ } /** * @brief MDMA SRAM error callback. * @param hmdma : MDMA handle * @retval None */ static void SRAM_DMAError(MDMA_HandleTypeDef *hmdma) { SRAM_HandleTypeDef *hsram = (SRAM_HandleTypeDef *)(hmdma->Parent); /* Disable the MDMA channel */ __HAL_MDMA_DISABLE(hmdma); /* Update the SRAM controller state */ hsram->State = HAL_SRAM_STATE_ERROR; #if (USE_HAL_SRAM_REGISTER_CALLBACKS == 1) hsram->DmaXferErrorCallback(hmdma); #else HAL_SRAM_DMA_XferErrorCallback(hmdma); #endif /* USE_HAL_SRAM_REGISTER_CALLBACKS */ } /** * @} */ #endif /* HAL_SRAM_MODULE_ENABLED */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
adfernandes/mbed
targets/TARGET_STM/TARGET_STM32H7/STM32Cube_FW/STM32H7xx_HAL_Driver/stm32h7xx_hal_sram.c
C
apache-2.0
33,824
/* This testcase is part of GDB, the GNU debugger. Copyright 2014-2016 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ extern int shlib_1_func (void); int main () { /* We need a reference to shlib_1_func to make sure its shlib is not discarded from the link. This happens on windows. */ int x = shlib_1_func (); return 0; }
swigger/gdb-ios
gdb/testsuite/gdb.base/symtab-search-order.c
C
gpl-2.0
974
// // Copyright(C) 1993-1996 Id Software, Inc. // Copyright(C) 2005-2014 Simon Howard // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // DESCRIPTION: // Mission begin melt/wipe screen special effect. // #include <string.h> #include "z_zone.h" #include "i_video.h" #include "v_video.h" #include "m_random.h" #include "doomtype.h" #include "f_wipe.h" // // SCREEN WIPE PACKAGE // // when zero, stop the wipe static boolean go = 0; static byte* wipe_scr_start; static byte* wipe_scr_end; static byte* wipe_scr; void wipe_shittyColMajorXform ( short* array, int width, int height ) { int x; int y; short* dest; dest = (short*) Z_Malloc(width*height*2, PU_STATIC, 0); for(y=0;y<height;y++) for(x=0;x<width;x++) dest[x*height+y] = array[y*width+x]; memcpy(array, dest, width*height*2); Z_Free(dest); } int wipe_initColorXForm ( int width, int height, int ticks ) { memcpy(wipe_scr, wipe_scr_start, width*height); return 0; } int wipe_doColorXForm ( int width, int height, int ticks ) { boolean changed; byte* w; byte* e; int newval; changed = false; w = wipe_scr; e = wipe_scr_end; while (w!=wipe_scr+width*height) { if (*w != *e) { if (*w > *e) { newval = *w - ticks; if (newval < *e) *w = *e; else *w = newval; changed = true; } else if (*w < *e) { newval = *w + ticks; if (newval > *e) *w = *e; else *w = newval; changed = true; } } w++; e++; } return !changed; } int wipe_exitColorXForm ( int width, int height, int ticks ) { return 0; } static int* y; int wipe_initMelt ( int width, int height, int ticks ) { int i, r; // copy start screen to main screen memcpy(wipe_scr, wipe_scr_start, width*height); // makes this wipe faster (in theory) // to have stuff in column-major format wipe_shittyColMajorXform((short*)wipe_scr_start, width/2, height); wipe_shittyColMajorXform((short*)wipe_scr_end, width/2, height); // setup initial column positions // (y<0 => not ready to scroll yet) y = (int *) Z_Malloc(width*sizeof(int), PU_STATIC, 0); y[0] = -(M_Random()%16); for (i=1;i<width;i++) { r = (M_Random()%3) - 1; y[i] = y[i-1] + r; if (y[i] > 0) y[i] = 0; else if (y[i] == -16) y[i] = -15; } return 0; } int wipe_doMelt ( int width, int height, int ticks ) { int i; int j; int dy; int idx; short* s; short* d; boolean done = true; width/=2; while (ticks--) { for (i=0;i<width;i++) { if (y[i]<0) { y[i]++; done = false; } else if (y[i] < height) { dy = (y[i] < 16) ? y[i]+1 : 8; if (y[i]+dy >= height) dy = height - y[i]; s = &((short *)wipe_scr_end)[i*height+y[i]]; d = &((short *)wipe_scr)[y[i]*width+i]; idx = 0; for (j=dy;j;j--) { d[idx] = *(s++); idx += width; } y[i] += dy; s = &((short *)wipe_scr_start)[i*height]; d = &((short *)wipe_scr)[y[i]*width+i]; idx = 0; for (j=height-y[i];j;j--) { d[idx] = *(s++); idx += width; } done = false; } } } return done; } int wipe_exitMelt ( int width, int height, int ticks ) { Z_Free(y); Z_Free(wipe_scr_start); Z_Free(wipe_scr_end); return 0; } int wipe_StartScreen ( int x, int y, int width, int height ) { wipe_scr_start = Z_Malloc(SCREENWIDTH * SCREENHEIGHT, PU_STATIC, NULL); I_ReadScreen(wipe_scr_start); return 0; } int wipe_EndScreen ( int x, int y, int width, int height ) { wipe_scr_end = Z_Malloc(SCREENWIDTH * SCREENHEIGHT, PU_STATIC, NULL); I_ReadScreen(wipe_scr_end); V_DrawBlock(x, y, width, height, wipe_scr_start); // restore start scr. return 0; } int wipe_ScreenWipe ( int wipeno, int x, int y, int width, int height, int ticks ) { int rc; static int (*wipes[])(int, int, int) = { wipe_initColorXForm, wipe_doColorXForm, wipe_exitColorXForm, wipe_initMelt, wipe_doMelt, wipe_exitMelt }; // initial stuff if (!go) { go = 1; // wipe_scr = (byte *) Z_Malloc(width*height, PU_STATIC, 0); // DEBUG wipe_scr = I_VideoBuffer; (*wipes[wipeno*3])(width, height, ticks); } // do a piece of wipe-in V_MarkRect(0, 0, width, height); rc = (*wipes[wipeno*3+1])(width, height, ticks); // V_DrawBlock(x, y, 0, width, height, wipe_scr); // DEBUG // final stuff if (rc) { go = 0; (*wipes[wipeno*3+2])(width, height, ticks); } return !go; }
WarlockD/crispy-doom
stm32/chocolate/chocdoom/f_wipe.c
C
gpl-2.0
5,076
/** * @file * * AMD Family_10 Hydra Logical ID Table * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: CPU/FAMILY/0x10 * @e \$Revision: 6261 $ @e \$Date: 2008-06-04 17:38:17 -0500 (Wed, 04 Jun 2008) $ * */ /* ****************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /*---------------------------------------------------------------------------------------- * M O D U L E S U S E D *---------------------------------------------------------------------------------------- */ #include "AGESA.h" #include "cpuRegisters.h" #include "Filecode.h" #define FILECODE PROC_CPU_FAMILY_0X10_REVD_HY_F10HYLOGICALIDTABLES_FILECODE /*---------------------------------------------------------------------------------------- * D E F I N I T I O N S A N D M A C R O S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * T Y P E D E F S A N D S T R U C T U R E S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * P R O T O T Y P E S O F L O C A L F U N C T I O N S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * E X P O R T E D F U N C T I O N S *---------------------------------------------------------------------------------------- */ VOID GetF10HyLogicalIdAndRev ( OUT CONST CPU_LOGICAL_ID_XLAT **HyIdPtr, OUT UINT8 *NumberOfElements, OUT UINT64 *LogicalFamily, IN OUT AMD_CONFIG_PARAMS *StdHeader ); STATIC CONST CPU_LOGICAL_ID_XLAT ROMDATA CpuF10HyLogicalIdAndRevArray[] = { { 0x1080, AMD_F10_HY_SCM_D0 }, { 0x1090, AMD_F10_HY_MCM_D0 }, { 0x1081, AMD_F10_HY_SCM_D1 }, { 0x1091, AMD_F10_HY_MCM_D1 } }; VOID GetF10HyLogicalIdAndRev ( OUT CONST CPU_LOGICAL_ID_XLAT **HyIdPtr, OUT UINT8 *NumberOfElements, OUT UINT64 *LogicalFamily, IN OUT AMD_CONFIG_PARAMS *StdHeader ) { *NumberOfElements = (sizeof (CpuF10HyLogicalIdAndRevArray) / sizeof (CPU_LOGICAL_ID_XLAT)); *HyIdPtr = CpuF10HyLogicalIdAndRevArray; *LogicalFamily = AMD_FAMILY_10_HY; }
hustcalm/coreboot-hacking
src/vendorcode/amd/agesa/f10/Proc/CPU/Family/0x10/RevD/HY/F10HyLogicalIdTables.c
C
gpl-2.0
4,284
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_fault.c: handling VMX architecture-related VM exits * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) */ #include <xen/config.h> #include <xen/lib.h> #include <xen/errno.h> #include <xen/sched.h> #include <xen/smp.h> #include <asm/ptrace.h> #include <xen/delay.h> #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */ #include <asm/sal.h> /* FOR struct ia64_sal_retval */ #include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/desc.h> #include <asm/vlsapic.h> #include <xen/irq.h> #include <xen/event.h> #include <asm/regionreg.h> #include <asm/privop.h> #include <asm/ia64_int.h> #include <asm/debugger.h> #include <asm/dom_fw.h> #include <asm/vmx_vcpu.h> #include <asm/kregs.h> #include <asm/vmx.h> #include <asm/vmmu.h> #include <asm/vmx_mm_def.h> #include <asm/vmx_phy_mode.h> #include <xen/mm.h> #include <asm/vmx_pal.h> #include <asm/shadow.h> #include <asm/sioemu.h> #include <public/arch-ia64/sioemu.h> #include <xen/hvm/irq.h> /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */ #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr); #define DOMN_PAL_REQUEST 0x110000 #define DOMN_SAL_REQUEST 0x110001 static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800, 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000, 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600, 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000, 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00, 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400, 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00, 0x7f00 }; void vmx_lazy_load_fpu(struct vcpu *vcpu) { if (FP_PSR(vcpu) & IA64_PSR_DFH) { FP_PSR(vcpu) = IA64_PSR_MFH; if (__ia64_per_cpu_var(fp_owner) != vcpu) __ia64_load_fpu(vcpu->arch._thread.fph); } } void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim, u64 vec, REGS *regs) { u64 status, vector; VCPU *vcpu = current; u64 vpsr = VCPU(vcpu, vpsr); vector = vec2off[vec]; switch (vec) { case 5: // IA64_DATA_NESTED_TLB_VECTOR break; case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; if (vhpt_access_rights_fixup(vcpu, ifa, 0)) return; break; case 25: // IA64_DISABLED_FPREG_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; vmx_lazy_load_fpu(vcpu); if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) { regs->cr_ipsr &= ~IA64_PSR_DFH; return; } break; case 32: // IA64_FP_FAULT_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; // handle fpswa emulation // fp fault status = handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(vcpu); return; } break; case 33: // IA64_FP_TRAP_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; //fp trap status = handle_fpu_swa(0, regs, isr); if (!status) return; break; case 29: // IA64_DEBUG_VECTOR case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR case 36: // IA64_SINGLE_STEP_TRAP_VECTOR if (vmx_guest_kernel_mode(regs) && current->domain->debugger_attached) { domain_pause_for_debugger(); return; } if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; default: if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; } VCPU(vcpu,isr) = isr; VCPU(vcpu,iipa) = regs->cr_iip; if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) VCPU(vcpu,iim) = iim; else set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); inject_guest_interruption(vcpu, vector); return; nested_fault: panic_domain(regs, "Guest nested fault vector=%lx!\n", vector); } IA64FAULT vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim) { struct domain *d = current->domain; struct vcpu *v = current; perfc_incr(vmx_ia64_handle_break); #ifdef CRASH_DEBUG if ((iim == 0 || iim == CDB_BREAK_NUM) && !vmx_user_mode(regs) && IS_VMM_ADDRESS(regs->cr_iip)) { if (iim == 0) show_registers(regs); debugger_trap_fatal(0 /* don't care */, regs); regs_increment_iip(regs); return IA64_NO_FAULT; } #endif if (!vmx_user_mode(regs)) { show_registers(regs); gdprintk(XENLOG_DEBUG, "%s:%d imm %lx\n", __func__, __LINE__, iim); ia64_fault(11 /* break fault */, isr, ifa, iim, 0 /* cr.itir */, 0, 0, 0, (unsigned long)regs); } if (ia64_psr(regs)->cpl == 0) { /* Allow hypercalls only when cpl = 0. */ /* Only common hypercalls are handled by vmx_break_fault. */ if (iim == d->arch.breakimm) { ia64_hypercall(regs); vcpu_increment_iip(v); return IA64_NO_FAULT; } /* normal hypercalls are handled by vmx_break_fault */ BUG_ON(iim == d->arch.breakimm); if (iim == DOMN_PAL_REQUEST) { pal_emul(v); vcpu_increment_iip(v); return IA64_NO_FAULT; } else if (iim == DOMN_SAL_REQUEST) { if (d->arch.is_sioemu) sioemu_sal_assist(v); else { sal_emul(v); vcpu_increment_iip(v); } return IA64_NO_FAULT; } } vmx_reflect_interruption(ifa, isr, iim, 11, regs); return IA64_NO_FAULT; } void save_banked_regs_to_vpd(VCPU *v, REGS *regs) { unsigned long i=0UL, * src,* dst, *sunat, *dunat; IA64_PSR vpsr; src = &regs->r16; sunat = &regs->eml_unat; vpsr.val = VCPU(v, vpsr); if (vpsr.bn) { dst = &VCPU(v, vgr[0]); dunat =&VCPU(v, vnat); __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \ dep %2 = %0, %2, 0, 16;; \ st8 [%3] = %2;;" ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } else { dst = &VCPU(v, vbgr[0]); // dunat =&VCPU(v, vbnat); // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; // dep %2 = %0, %2, 16, 16;; // st8 [%3] = %2;;" // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } for (i = 0; i < 16; i++) *dst++ = *src++; } // ONLY gets called from ia64_leave_kernel // ONLY call with interrupts disabled?? (else might miss one?) // NEVER successful if already reflecting a trap/fault because psr.i==0 void leave_hypervisor_tail(void) { struct domain *d = current->domain; struct vcpu *v = current; /* FIXME: can this happen ? */ if (is_idle_domain(current->domain)) return; // A softirq may generate an interrupt. So call softirq early. local_irq_enable(); do_softirq(); local_irq_disable(); // FIXME: Will this work properly if doing an RFI??? if (d->arch.is_sioemu) { if (local_events_need_delivery()) { sioemu_deliver_event(); } } else if (v->vcpu_id == 0) { unsigned long callback_irq = d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ]; if (v->arch.arch_vmx.pal_init_pending) { /* inject INIT interruption to guest pal */ v->arch.arch_vmx.pal_init_pending = 0; deliver_pal_init(v); return; } /* * val[63:56] == 1: val[55:0] is a delivery PCI INTx line: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * val[63:56] == 0: val[55:0] is a delivery as GSI */ if (callback_irq != 0 && local_events_need_delivery()) { /* change level for para-device callback irq */ /* use level irq to send discrete event */ if ((uint8_t)(callback_irq >> 56) == 1) { /* case of using PCI INTx line as callback irq */ int pdev = (callback_irq >> 11) & 0x1f; int pintx = callback_irq & 3; viosapic_set_pci_irq(d, pdev, pintx, 1); viosapic_set_pci_irq(d, pdev, pintx, 0); } else { /* case of using GSI as callback irq */ viosapic_set_irq(d, callback_irq, 1); viosapic_set_irq(d, callback_irq, 0); } } } rmb(); if (xchg(&v->arch.irq_new_pending, 0)) { v->arch.irq_new_condition = 0; vmx_check_pending_irq(v); } else if (v->arch.irq_new_condition) { v->arch.irq_new_condition = 0; vhpi_detection(v); } } static int vmx_handle_lds(REGS* regs) { regs->cr_ipsr |= IA64_PSR_ED; return IA64_FAULT; } static inline int unimpl_phys_addr (u64 paddr) { return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0; } /* We came here because the H/W VHPT walker failed to find an entry */ IA64FAULT vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs) { IA64_PSR vpsr; int type; u64 vhpt_adr, gppa, pteval, rr, itir; ISR misr; PTA vpta; thash_data_t *data; VCPU *v = current; vpsr.val = VCPU(v, vpsr); misr.val = VMX(v,cr_isr); if (vec == 1 || vec == 3) type = ISIDE_TLB; else if (vec == 2 || vec == 4) type = DSIDE_TLB; else panic_domain(regs, "wrong vec:%lx\n", vec); /* Physical mode. */ if (type == ISIDE_TLB) { if (!vpsr.it) { if (unlikely(unimpl_phys_addr(vadr))) { unimpl_iaddr_trap(v, vadr); return IA64_FAULT; } physical_tlb_miss(v, vadr, type); return IA64_FAULT; } } else { /* DTLB miss. */ if (!misr.rs) { if (!vpsr.dt) { u64 pte; if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */ return vmx_handle_lds(regs); if (unlikely(unimpl_phys_addr(vadr))) { unimpl_daddr(v); return IA64_FAULT; } pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL); if (v->domain != dom0 && (pte & _PAGE_IO)) { emulate_io_inst(v, pa_clear_uc(vadr), 4, pte_pfn(__pte(pte))); return IA64_FAULT; } physical_tlb_miss(v, vadr, type); return IA64_FAULT; } } else { /* RSE fault. */ if (!vpsr.rt) { if (unlikely(unimpl_phys_addr(vadr))) { unimpl_daddr(v); return IA64_FAULT; } physical_tlb_miss(v, vadr, type); return IA64_FAULT; } } } try_again: /* Search in VTLB. */ data = vtlb_lookup(v, vadr, type); if (data != 0) { /* Found. */ if (v->domain != dom0 && type == DSIDE_TLB) { u64 pte; if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */ if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE)) return vmx_handle_lds(regs); } gppa = thash_translate(data, vadr); pte = lookup_domain_mpa(v->domain, gppa, NULL); if (pte & _PAGE_IO) { if (misr.sp) panic_domain(NULL, "ld.s on I/O page not with UC attr." " pte=0x%lx\n", data->page_flags); if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3)) emulate_io_inst(v, gppa, data->ma, pte_pfn(__pte(pte))); else { vcpu_set_isr(v, misr.val); data_access_rights(v, vadr); } return IA64_FAULT; } } thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); return IA64_NO_FAULT; } if (type == DSIDE_TLB) { struct opt_feature* optf = &(v->domain->arch.opt_feature); if (misr.sp) return vmx_handle_lds(regs); vcpu_get_rr(v, vadr, &rr); itir = rr & (RR_RID_MASK | RR_PS_MASK); if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { /* windows use region 4 and 5 for identity mapping */ if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) && REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) && REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) { pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) | optf->im_reg4.pgprot; if (thash_purge_and_insert(v, pteval, itir, vadr, type)) goto try_again; return IA64_NO_FAULT; } if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) && REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) && REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) { pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) | optf->im_reg5.pgprot; if (thash_purge_and_insert(v, pteval, itir, vadr, type)) goto try_again; return IA64_NO_FAULT; } if (vpsr.ic) { vcpu_set_isr(v, misr.val); alt_dtlb(v, vadr); } else { nested_dtlb(v); } return IA64_FAULT; } vpta.val = vmx_vcpu_get_pta(v); if (vpta.vf) { /* Long format is not yet supported. */ goto inject_dtlb_fault; } /* avoid recursively walking (short format) VHPT */ if (!(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) && !(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) && (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) { goto inject_dtlb_fault; } vhpt_adr = vmx_vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (!(pteval & _PAGE_P)) { goto inject_dtlb_fault; } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB); return IA64_NO_FAULT; } goto inject_dtlb_fault; } else { /* Can't read VHPT. */ if (vpsr.ic) { vcpu_set_isr(v, misr.val); dvhpt_fault(v, vadr); return IA64_FAULT; } else { nested_dtlb(v); return IA64_FAULT; } } } else if (type == ISIDE_TLB) { if (!vpsr.ic) misr.ni = 1; /* Don't bother with PHY_D mode (will require rr0+rr4 switches, and certainly used only within nested TLB handler (hence TR mapped and ic=0). */ if (!vpsr.dt) goto inject_itlb_fault; if (!vhpt_enabled(v, vadr, INST_REF)) { vcpu_set_isr(v, misr.val); alt_itlb(v, vadr); return IA64_FAULT; } vpta.val = vmx_vcpu_get_pta(v); if (vpta.vf) { /* Long format is not yet supported. */ goto inject_itlb_fault; } vhpt_adr = vmx_vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (pteval & _PAGE_P) { if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { goto inject_itlb_fault; } vcpu_get_rr(v, vadr, &rr); itir = rr & (RR_RID_MASK | RR_PS_MASK); thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB); return IA64_NO_FAULT; } else { vcpu_set_isr(v, misr.val); inst_page_not_present(v, vadr); return IA64_FAULT; } } else { vcpu_set_isr(v, misr.val); ivhpt_fault(v, vadr); return IA64_FAULT; } } return IA64_NO_FAULT; inject_dtlb_fault: if (vpsr.ic) { vcpu_set_isr(v, misr.val); dtlb_fault(v, vadr); } else nested_dtlb(v); return IA64_FAULT; inject_itlb_fault: vcpu_set_isr(v, misr.val); itlb_fault(v, vadr); return IA64_FAULT; } void vmx_ia64_shadow_fault(u64 ifa, u64 isr, u64 mpa, REGS *regs) { struct vcpu *v = current; struct domain *d = v->domain; u64 gpfn, pte; thash_data_t *data; if (!shadow_mode_enabled(d)) goto inject_dirty_bit; gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT); data = vhpt_lookup(ifa); if (data) { pte = data->page_flags; // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK)); if (!(pte & _PAGE_VIRT_D)) goto inject_dirty_bit; data->page_flags = pte | _PAGE_D; } else { data = vtlb_lookup(v, ifa, DSIDE_TLB); if (data) { if (!(data->page_flags & _PAGE_VIRT_D)) goto inject_dirty_bit; } pte = 0; } /* Set the dirty bit in the bitmap. */ shadow_mark_page_dirty(d, gpfn); /* Retry */ atomic64_inc(&d->arch.shadow_fault_count); ia64_ptcl(ifa, PAGE_SHIFT << 2); return; inject_dirty_bit: /* Reflect. no need to purge. */ VCPU(v, isr) = isr; set_ifa_itir_iha (v, ifa, 1, 1, 1); inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR); return; }
jamesbulpin/xcp-xen-4.1
xen/arch/ia64/vmx/vmx_fault.c
C
gpl-2.0
19,082
/* $OpenBSD: strdup.c,v 1.6 2005/08/08 08:05:37 espie Exp $ */ /* * Copyright (c) 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sys/types.h" #include "stdlib.h" #include "string.h" char *strdup(const char *str) { size_t siz; char *copy; siz = strlen(str) + 1; if ((copy = malloc(siz)) == NULL) return(NULL); (void)memcpy(copy, str, siz); return(copy); }
amenglar/freenos
lib/libc/string/strdup.c
C
gpl-3.0
1,906
/* * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "inner.h" /* see bearssl_rsa.h */ uint32_t br_rsa_ssl_decrypt(br_rsa_private core, const br_rsa_private_key *sk, unsigned char *data, size_t len) { uint32_t x; size_t u; /* * A first check on length. Since this test works only on the * buffer length, it needs not (and cannot) be constant-time. */ if (len < 59 || len != (sk->n_bitlen + 7) >> 3) { return 0; } x = core(data, sk); x &= EQ(data[0], 0x00); x &= EQ(data[1], 0x02); for (u = 2; u < (len - 49); u ++) { x &= NEQ(data[u], 0); } x &= EQ(data[len - 49], 0x00); memmove(data, data + len - 48, 48); return x; }
Themaister/RetroArch
deps/bearssl-0.6/src/rsa/rsa_ssl_decrypt.c
C
gpl-3.0
1,751
/* * QEMU System Emulator block driver * * Copyright (c) 2003 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config-host.h" #include "qemu-common.h" #include "trace.h" #include "block/block_int.h" #include "block/blockjob.h" #include "qemu/module.h" #include "qapi/qmp/qjson.h" #include "sysemu/block-backend.h" #include "sysemu/sysemu.h" #include "qemu/notify.h" #include "block/coroutine.h" #include "block/qapi.h" #include "qmp-commands.h" #include "qemu/timer.h" #include "qapi-event.h" #ifdef CONFIG_BSD #include <sys/types.h> #include <sys/stat.h> #include <sys/ioctl.h> #include <sys/queue.h> #ifndef __DragonFly__ #include <sys/disk.h> #endif #endif #ifdef _WIN32 #include <windows.h> #endif struct BdrvDirtyBitmap { HBitmap *bitmap; QLIST_ENTRY(BdrvDirtyBitmap) list; }; #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque); static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque); static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov); static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov); static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque, bool is_write); static void coroutine_fn bdrv_co_do_rw(void *opaque); static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); static QTAILQ_HEAD(, BlockDriverState) bdrv_states = QTAILQ_HEAD_INITIALIZER(bdrv_states); static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); static QLIST_HEAD(, BlockDriver) bdrv_drivers = QLIST_HEAD_INITIALIZER(bdrv_drivers); /* If non-zero, use only whitelisted block drivers */ static int use_bdrv_whitelist; #ifdef _WIN32 static int is_windows_drive_prefix(const char *filename) { return (((filename[0] >= 'a' && filename[0] <= 'z') || (filename[0] >= 'A' && filename[0] <= 'Z')) && filename[1] == ':'); } int is_windows_drive(const char *filename) { if (is_windows_drive_prefix(filename) && filename[2] == '\0') return 1; if (strstart(filename, "\\\\.\\", NULL) || strstart(filename, "//./", NULL)) return 1; return 0; } #endif /* throttling disk I/O limits */ void bdrv_set_io_limits(BlockDriverState *bs, ThrottleConfig *cfg) { int i; throttle_config(&bs->throttle_state, cfg); for (i = 0; i < 2; i++) { qemu_co_enter_next(&bs->throttled_reqs[i]); } } /* this function drain all the throttled IOs */ static bool bdrv_start_throttled_reqs(BlockDriverState *bs) { bool drained = false; bool enabled = bs->io_limits_enabled; int i; bs->io_limits_enabled = false; for (i = 0; i < 2; i++) { while (qemu_co_enter_next(&bs->throttled_reqs[i])) { drained = true; } } bs->io_limits_enabled = enabled; return drained; } void bdrv_io_limits_disable(BlockDriverState *bs) { bs->io_limits_enabled = false; bdrv_start_throttled_reqs(bs); throttle_destroy(&bs->throttle_state); } static void bdrv_throttle_read_timer_cb(void *opaque) { BlockDriverState *bs = opaque; qemu_co_enter_next(&bs->throttled_reqs[0]); } static void bdrv_throttle_write_timer_cb(void *opaque) { BlockDriverState *bs = opaque; qemu_co_enter_next(&bs->throttled_reqs[1]); } /* should be called before bdrv_set_io_limits if a limit is set */ void bdrv_io_limits_enable(BlockDriverState *bs) { assert(!bs->io_limits_enabled); throttle_init(&bs->throttle_state, bdrv_get_aio_context(bs), QEMU_CLOCK_VIRTUAL, bdrv_throttle_read_timer_cb, bdrv_throttle_write_timer_cb, bs); bs->io_limits_enabled = true; } /* This function makes an IO wait if needed * * @nb_sectors: the number of sectors of the IO * @is_write: is the IO a write */ static void bdrv_io_limits_intercept(BlockDriverState *bs, unsigned int bytes, bool is_write) { /* does this io must wait */ bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); /* if must wait or any request of this type throttled queue the IO */ if (must_wait || !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { qemu_co_queue_wait(&bs->throttled_reqs[is_write]); } /* the IO will be executed, do the accounting */ throttle_account(&bs->throttle_state, is_write, bytes); /* if the next request must wait -> do nothing */ if (throttle_schedule_timer(&bs->throttle_state, is_write)) { return; } /* else queue next request for execution */ qemu_co_queue_next(&bs->throttled_reqs[is_write]); } size_t bdrv_opt_mem_align(BlockDriverState *bs) { if (!bs || !bs->drv) { /* 4k should be on the safe side */ return 4096; } return bs->bl.opt_mem_alignment; } /* check if the path starts with "<protocol>:" */ static int path_has_protocol(const char *path) { const char *p; #ifdef _WIN32 if (is_windows_drive(path) || is_windows_drive_prefix(path)) { return 0; } p = path + strcspn(path, ":/\\"); #else p = path + strcspn(path, ":/"); #endif return *p == ':'; } int path_is_absolute(const char *path) { #ifdef _WIN32 /* specific case for names like: "\\.\d:" */ if (is_windows_drive(path) || is_windows_drive_prefix(path)) { return 1; } return (*path == '/' || *path == '\\'); #else return (*path == '/'); #endif } /* if filename is absolute, just copy it to dest. Otherwise, build a path to it by considering it is relative to base_path. URL are supported. */ void path_combine(char *dest, int dest_size, const char *base_path, const char *filename) { const char *p, *p1; int len; if (dest_size <= 0) return; if (path_is_absolute(filename)) { pstrcpy(dest, dest_size, filename); } else { p = strchr(base_path, ':'); if (p) p++; else p = base_path; p1 = strrchr(base_path, '/'); #ifdef _WIN32 { const char *p2; p2 = strrchr(base_path, '\\'); if (!p1 || p2 > p1) p1 = p2; } #endif if (p1) p1++; else p1 = base_path; if (p1 > p) p = p1; len = p - base_path; if (len > dest_size - 1) len = dest_size - 1; memcpy(dest, base_path, len); dest[len] = '\0'; pstrcat(dest, dest_size, filename); } } void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz) { if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) { pstrcpy(dest, sz, bs->backing_file); } else { path_combine(dest, sz, bs->filename, bs->backing_file); } } void bdrv_register(BlockDriver *bdrv) { /* Block drivers without coroutine functions need emulation */ if (!bdrv->bdrv_co_readv) { bdrv->bdrv_co_readv = bdrv_co_readv_em; bdrv->bdrv_co_writev = bdrv_co_writev_em; /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if * the block driver lacks aio we need to emulate that too. */ if (!bdrv->bdrv_aio_readv) { /* add AIO emulation layer */ bdrv->bdrv_aio_readv = bdrv_aio_readv_em; bdrv->bdrv_aio_writev = bdrv_aio_writev_em; } } QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); } BlockDriverState *bdrv_new_root(void) { BlockDriverState *bs = bdrv_new(); QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); return bs; } BlockDriverState *bdrv_new(void) { BlockDriverState *bs; int i; bs = g_new0(BlockDriverState, 1); QLIST_INIT(&bs->dirty_bitmaps); for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { QLIST_INIT(&bs->op_blockers[i]); } bdrv_iostatus_disable(bs); notifier_list_init(&bs->close_notifiers); notifier_with_return_list_init(&bs->before_write_notifiers); qemu_co_queue_init(&bs->throttled_reqs[0]); qemu_co_queue_init(&bs->throttled_reqs[1]); bs->refcnt = 1; bs->aio_context = qemu_get_aio_context(); return bs; } void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify) { notifier_list_add(&bs->close_notifiers, notify); } BlockDriver *bdrv_find_format(const char *format_name) { BlockDriver *drv1; QLIST_FOREACH(drv1, &bdrv_drivers, list) { if (!strcmp(drv1->format_name, format_name)) { return drv1; } } return NULL; } static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) { static const char *whitelist_rw[] = { CONFIG_BDRV_RW_WHITELIST }; static const char *whitelist_ro[] = { CONFIG_BDRV_RO_WHITELIST }; const char **p; if (!whitelist_rw[0] && !whitelist_ro[0]) { return 1; /* no whitelist, anything goes */ } for (p = whitelist_rw; *p; p++) { if (!strcmp(drv->format_name, *p)) { return 1; } } if (read_only) { for (p = whitelist_ro; *p; p++) { if (!strcmp(drv->format_name, *p)) { return 1; } } } return 0; } BlockDriver *bdrv_find_whitelisted_format(const char *format_name, bool read_only) { BlockDriver *drv = bdrv_find_format(format_name); return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL; } typedef struct CreateCo { BlockDriver *drv; char *filename; QemuOpts *opts; int ret; Error *err; } CreateCo; static void coroutine_fn bdrv_create_co_entry(void *opaque) { Error *local_err = NULL; int ret; CreateCo *cco = opaque; assert(cco->drv); ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err); if (local_err) { error_propagate(&cco->err, local_err); } cco->ret = ret; } int bdrv_create(BlockDriver *drv, const char* filename, QemuOpts *opts, Error **errp) { int ret; Coroutine *co; CreateCo cco = { .drv = drv, .filename = g_strdup(filename), .opts = opts, .ret = NOT_DONE, .err = NULL, }; if (!drv->bdrv_create) { error_setg(errp, "Driver '%s' does not support image creation", drv->format_name); ret = -ENOTSUP; goto out; } if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_create_co_entry(&cco); } else { co = qemu_coroutine_create(bdrv_create_co_entry); qemu_coroutine_enter(co, &cco); while (cco.ret == NOT_DONE) { aio_poll(qemu_get_aio_context(), true); } } ret = cco.ret; if (ret < 0) { if (cco.err) { error_propagate(errp, cco.err); } else { error_setg_errno(errp, -ret, "Could not create image"); } } out: g_free(cco.filename); return ret; } int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) { BlockDriver *drv; Error *local_err = NULL; int ret; drv = bdrv_find_protocol(filename, true); if (drv == NULL) { error_setg(errp, "Could not find protocol for file '%s'", filename); return -ENOENT; } ret = bdrv_create(drv, filename, opts, &local_err); if (local_err) { error_propagate(errp, local_err); } return ret; } void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) { BlockDriver *drv = bs->drv; Error *local_err = NULL; memset(&bs->bl, 0, sizeof(bs->bl)); if (!drv) { return; } /* Take some limits from the children as a default */ if (bs->file) { bdrv_refresh_limits(bs->file, &local_err); if (local_err) { error_propagate(errp, local_err); return; } bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; } else { bs->bl.opt_mem_alignment = 512; } if (bs->backing_hd) { bdrv_refresh_limits(bs->backing_hd, &local_err); if (local_err) { error_propagate(errp, local_err); return; } bs->bl.opt_transfer_length = MAX(bs->bl.opt_transfer_length, bs->backing_hd->bl.opt_transfer_length); bs->bl.opt_mem_alignment = MAX(bs->bl.opt_mem_alignment, bs->backing_hd->bl.opt_mem_alignment); } /* Then let the driver override it */ if (drv->bdrv_refresh_limits) { drv->bdrv_refresh_limits(bs, errp); } } /* * Create a uniquely-named empty temporary file. * Return 0 upon success, otherwise a negative errno value. */ int get_tmp_filename(char *filename, int size) { #ifdef _WIN32 char temp_dir[MAX_PATH]; /* GetTempFileName requires that its output buffer (4th param) have length MAX_PATH or greater. */ assert(size >= MAX_PATH); return (GetTempPath(MAX_PATH, temp_dir) && GetTempFileName(temp_dir, "qem", 0, filename) ? 0 : -GetLastError()); #else int fd; const char *tmpdir; tmpdir = getenv("TMPDIR"); if (!tmpdir) { tmpdir = "/var/tmp"; } if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { return -EOVERFLOW; } fd = mkstemp(filename); if (fd < 0) { return -errno; } if (close(fd) != 0) { unlink(filename); return -errno; } return 0; #endif } /* * Detect host devices. By convention, /dev/cdrom[N] is always * recognized as a host CDROM. */ static BlockDriver *find_hdev_driver(const char *filename) { int score_max = 0, score; BlockDriver *drv = NULL, *d; QLIST_FOREACH(d, &bdrv_drivers, list) { if (d->bdrv_probe_device) { score = d->bdrv_probe_device(filename); if (score > score_max) { score_max = score; drv = d; } } } return drv; } BlockDriver *bdrv_find_protocol(const char *filename, bool allow_protocol_prefix) { BlockDriver *drv1; char protocol[128]; int len; const char *p; /* TODO Drivers without bdrv_file_open must be specified explicitly */ /* * XXX(hch): we really should not let host device detection * override an explicit protocol specification, but moving this * later breaks access to device names with colons in them. * Thanks to the brain-dead persistent naming schemes on udev- * based Linux systems those actually are quite common. */ drv1 = find_hdev_driver(filename); if (drv1) { return drv1; } if (!path_has_protocol(filename) || !allow_protocol_prefix) { return bdrv_find_format("file"); } p = strchr(filename, ':'); assert(p != NULL); len = p - filename; if (len > sizeof(protocol) - 1) len = sizeof(protocol) - 1; memcpy(protocol, filename, len); protocol[len] = '\0'; QLIST_FOREACH(drv1, &bdrv_drivers, list) { if (drv1->protocol_name && !strcmp(drv1->protocol_name, protocol)) { return drv1; } } return NULL; } static int find_image_format(BlockDriverState *bs, const char *filename, BlockDriver **pdrv, Error **errp) { int score, score_max; BlockDriver *drv1, *drv; uint8_t buf[2048]; int ret = 0; /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) { drv = bdrv_find_format("raw"); if (!drv) { error_setg(errp, "Could not find raw image format"); ret = -ENOENT; } *pdrv = drv; return ret; } ret = bdrv_pread(bs, 0, buf, sizeof(buf)); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read image for determining its " "format"); *pdrv = NULL; return ret; } score_max = 0; drv = NULL; QLIST_FOREACH(drv1, &bdrv_drivers, list) { if (drv1->bdrv_probe) { score = drv1->bdrv_probe(buf, ret, filename); if (score > score_max) { score_max = score; drv = drv1; } } } if (!drv) { error_setg(errp, "Could not determine image format: No compatible " "driver found"); ret = -ENOENT; } *pdrv = drv; return ret; } /** * Set the current 'total_sectors' value * Return 0 on success, -errno on error. */ static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) { BlockDriver *drv = bs->drv; /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ if (bs->sg) return 0; /* query actual device if possible, otherwise just trust the hint */ if (drv->bdrv_getlength) { int64_t length = drv->bdrv_getlength(bs); if (length < 0) { return length; } hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE); } bs->total_sectors = hint; return 0; } /** * Set open flags for a given discard mode * * Return 0 on success, -1 if the discard mode was invalid. */ int bdrv_parse_discard_flags(const char *mode, int *flags) { *flags &= ~BDRV_O_UNMAP; if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { /* do nothing */ } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { *flags |= BDRV_O_UNMAP; } else { return -1; } return 0; } /** * Set open flags for a given cache mode * * Return 0 on success, -1 if the cache mode was invalid. */ int bdrv_parse_cache_flags(const char *mode, int *flags) { *flags &= ~BDRV_O_CACHE_MASK; if (!strcmp(mode, "off") || !strcmp(mode, "none")) { *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; } else if (!strcmp(mode, "directsync")) { *flags |= BDRV_O_NOCACHE; } else if (!strcmp(mode, "writeback")) { *flags |= BDRV_O_CACHE_WB; } else if (!strcmp(mode, "unsafe")) { *flags |= BDRV_O_CACHE_WB; *flags |= BDRV_O_NO_FLUSH; } else if (!strcmp(mode, "writethrough")) { /* this is the default */ } else { return -1; } return 0; } /** * The copy-on-read flag is actually a reference count so multiple users may * use the feature without worrying about clobbering its previous state. * Copy-on-read stays enabled until all users have called to disable it. */ void bdrv_enable_copy_on_read(BlockDriverState *bs) { bs->copy_on_read++; } void bdrv_disable_copy_on_read(BlockDriverState *bs) { assert(bs->copy_on_read > 0); bs->copy_on_read--; } /* * Returns the flags that a temporary snapshot should get, based on the * originally requested flags (the originally requested image will have flags * like a backing file) */ static int bdrv_temp_snapshot_flags(int flags) { return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY; } /* * Returns the flags that bs->file should get, based on the given flags for * the parent BDS */ static int bdrv_inherited_flags(int flags) { /* Enable protocol handling, disable format probing for bs->file */ flags |= BDRV_O_PROTOCOL; /* Our block drivers take care to send flushes and respect unmap policy, * so we can enable both unconditionally on lower layers. */ flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP; /* Clear flags that only apply to the top layer */ flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ); return flags; } /* * Returns the flags that bs->backing_hd should get, based on the given flags * for the parent BDS */ static int bdrv_backing_flags(int flags) { /* backing files always opened read-only */ flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ); /* snapshot=on is handled on the top layer */ flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY); return flags; } static int bdrv_open_flags(BlockDriverState *bs, int flags) { int open_flags = flags | BDRV_O_CACHE_WB; /* * Clear flags that are internal to the block layer before opening the * image. */ open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL); /* * Snapshots should be writable. */ if (flags & BDRV_O_TEMPORARY) { open_flags |= BDRV_O_RDWR; } return open_flags; } static void bdrv_assign_node_name(BlockDriverState *bs, const char *node_name, Error **errp) { if (!node_name) { return; } /* Check for empty string or invalid characters */ if (!id_wellformed(node_name)) { error_setg(errp, "Invalid node name"); return; } /* takes care of avoiding namespaces collisions */ if (blk_by_name(node_name)) { error_setg(errp, "node-name=%s is conflicting with a device id", node_name); return; } /* takes care of avoiding duplicates node names */ if (bdrv_find_node(node_name)) { error_setg(errp, "Duplicate node name"); return; } /* copy node name into the bs and insert it into the graph list */ pstrcpy(bs->node_name, sizeof(bs->node_name), node_name); QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list); } /* * Common part for opening disk images and files * * Removes all processed options from *options. */ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret, open_flags; const char *filename; const char *node_name = NULL; Error *local_err = NULL; assert(drv != NULL); assert(bs->file == NULL); assert(options != NULL && bs->options != options); if (file != NULL) { filename = file->filename; } else { filename = qdict_get_try_str(options, "filename"); } if (drv->bdrv_needs_filename && !filename) { error_setg(errp, "The '%s' block driver requires a file name", drv->format_name); return -EINVAL; } trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); node_name = qdict_get_try_str(options, "node-name"); bdrv_assign_node_name(bs, node_name, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } qdict_del(options, "node-name"); /* bdrv_open() with directly using a protocol as drv. This layer is already * opened, so assign it to bs (while file becomes a closed BlockDriverState) * and return immediately. */ if (file != NULL && drv->bdrv_file_open) { bdrv_swap(file, bs); return 0; } bs->open_flags = flags; bs->guest_block_size = 512; bs->request_alignment = 512; bs->zero_beyond_eof = true; open_flags = bdrv_open_flags(bs, flags); bs->read_only = !(open_flags & BDRV_O_RDWR); bs->growable = !!(flags & BDRV_O_PROTOCOL); if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { error_setg(errp, !bs->read_only && bdrv_is_whitelisted(drv, true) ? "Driver '%s' can only be used for read-only devices" : "Driver '%s' is not whitelisted", drv->format_name); return -ENOTSUP; } assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ if (flags & BDRV_O_COPY_ON_READ) { if (!bs->read_only) { bdrv_enable_copy_on_read(bs); } else { error_setg(errp, "Can't use copy-on-read on read-only device"); return -EINVAL; } } if (filename != NULL) { pstrcpy(bs->filename, sizeof(bs->filename), filename); } else { bs->filename[0] = '\0'; } pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename); bs->drv = drv; bs->opaque = g_malloc0(drv->instance_size); bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); /* Open the image, either directly or using a protocol */ if (drv->bdrv_file_open) { assert(file == NULL); assert(!drv->bdrv_needs_filename || filename != NULL); ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); } else { if (file == NULL) { error_setg(errp, "Can't use '%s' as a block driver for the " "protocol level", drv->format_name); ret = -EINVAL; goto free_and_fail; } bs->file = file; ret = drv->bdrv_open(bs, options, open_flags, &local_err); } if (ret < 0) { if (local_err) { error_propagate(errp, local_err); } else if (bs->filename[0]) { error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); } else { error_setg_errno(errp, -ret, "Could not open image"); } goto free_and_fail; } ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); goto free_and_fail; } bdrv_refresh_limits(bs, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto free_and_fail; } assert(bdrv_opt_mem_align(bs) != 0); assert((bs->request_alignment != 0) || bs->sg); return 0; free_and_fail: bs->file = NULL; g_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; } static QDict *parse_json_filename(const char *filename, Error **errp) { QObject *options_obj; QDict *options; int ret; ret = strstart(filename, "json:", &filename); assert(ret); options_obj = qobject_from_json(filename); if (!options_obj) { error_setg(errp, "Could not parse the JSON options"); return NULL; } if (qobject_type(options_obj) != QTYPE_QDICT) { qobject_decref(options_obj); error_setg(errp, "Invalid JSON object given"); return NULL; } options = qobject_to_qdict(options_obj); qdict_flatten(options); return options; } /* * Fills in default options for opening images and converts the legacy * filename/flags pair to option QDict entries. */ static int bdrv_fill_options(QDict **options, const char **pfilename, int flags, BlockDriver *drv, Error **errp) { const char *filename = *pfilename; const char *drvname; bool protocol = flags & BDRV_O_PROTOCOL; bool parse_filename = false; Error *local_err = NULL; /* Parse json: pseudo-protocol */ if (filename && g_str_has_prefix(filename, "json:")) { QDict *json_options = parse_json_filename(filename, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } /* Options given in the filename have lower priority than options * specified directly */ qdict_join(*options, json_options, false); QDECREF(json_options); *pfilename = filename = NULL; } /* Fetch the file name from the options QDict if necessary */ if (protocol && filename) { if (!qdict_haskey(*options, "filename")) { qdict_put(*options, "filename", qstring_from_str(filename)); parse_filename = true; } else { error_setg(errp, "Can't specify 'file' and 'filename' options at " "the same time"); return -EINVAL; } } /* Find the right block driver */ filename = qdict_get_try_str(*options, "filename"); drvname = qdict_get_try_str(*options, "driver"); if (drv) { if (drvname) { error_setg(errp, "Driver specified twice"); return -EINVAL; } drvname = drv->format_name; qdict_put(*options, "driver", qstring_from_str(drvname)); } else { if (!drvname && protocol) { if (filename) { drv = bdrv_find_protocol(filename, parse_filename); if (!drv) { error_setg(errp, "Unknown protocol"); return -EINVAL; } drvname = drv->format_name; qdict_put(*options, "driver", qstring_from_str(drvname)); } else { error_setg(errp, "Must specify either driver or file"); return -EINVAL; } } else if (drvname) { drv = bdrv_find_format(drvname); if (!drv) { error_setg(errp, "Unknown driver '%s'", drvname); return -ENOENT; } } } assert(drv || !protocol); /* Driver-specific filename parsing */ if (drv && drv->bdrv_parse_filename && parse_filename) { drv->bdrv_parse_filename(filename, *options, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } if (!drv->bdrv_needs_filename) { qdict_del(*options, "filename"); } } return 0; } void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd) { if (bs->backing_hd) { assert(bs->backing_blocker); bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker); } else if (backing_hd) { error_setg(&bs->backing_blocker, "device is used as backing hd of '%s'", bdrv_get_device_name(bs)); } bs->backing_hd = backing_hd; if (!backing_hd) { error_free(bs->backing_blocker); bs->backing_blocker = NULL; goto out; } bs->open_flags &= ~BDRV_O_NO_BACKING; pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename); pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_hd->drv ? backing_hd->drv->format_name : ""); bdrv_op_block_all(bs->backing_hd, bs->backing_blocker); /* Otherwise we won't be able to commit due to check in bdrv_commit */ bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, bs->backing_blocker); out: bdrv_refresh_limits(bs, NULL); } /* * Opens the backing file for a BlockDriverState if not yet open * * options is a QDict of options to pass to the block drivers, or NULL for an * empty set of options. The reference to the QDict is transferred to this * function (even on failure), so if the caller intends to reuse the dictionary, * it needs to use QINCREF() before calling bdrv_file_open. */ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) { char *backing_filename = g_malloc0(PATH_MAX); int ret = 0; BlockDriver *back_drv = NULL; BlockDriverState *backing_hd; Error *local_err = NULL; if (bs->backing_hd != NULL) { QDECREF(options); goto free_exit; } /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } bs->open_flags &= ~BDRV_O_NO_BACKING; if (qdict_haskey(options, "file.filename")) { backing_filename[0] = '\0'; } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { QDECREF(options); goto free_exit; } else { bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX); } if (!bs->drv || !bs->drv->supports_backing) { ret = -EINVAL; error_setg(errp, "Driver doesn't support backing files"); QDECREF(options); goto free_exit; } backing_hd = bdrv_new(); if (bs->backing_format[0] != '\0') { back_drv = bdrv_find_format(bs->backing_format); } assert(bs->backing_hd == NULL); ret = bdrv_open(&backing_hd, *backing_filename ? backing_filename : NULL, NULL, options, bdrv_backing_flags(bs->open_flags), back_drv, &local_err); if (ret < 0) { bdrv_unref(backing_hd); backing_hd = NULL; bs->open_flags |= BDRV_O_NO_BACKING; error_setg(errp, "Could not open backing file: %s", error_get_pretty(local_err)); error_free(local_err); goto free_exit; } bdrv_set_backing_hd(bs, backing_hd); free_exit: g_free(backing_filename); return ret; } /* * Opens a disk image whose options are given as BlockdevRef in another block * device's options. * * If allow_none is true, no image will be opened if filename is false and no * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned. * * bdrev_key specifies the key for the image's BlockdevRef in the options QDict. * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict * itself, all options starting with "${bdref_key}." are considered part of the * BlockdevRef. * * The BlockdevRef will be removed from the options QDict. * * To conform with the behavior of bdrv_open(), *pbs has to be NULL. */ int bdrv_open_image(BlockDriverState **pbs, const char *filename, QDict *options, const char *bdref_key, int flags, bool allow_none, Error **errp) { QDict *image_options; int ret; char *bdref_key_dot; const char *reference; assert(pbs); assert(*pbs == NULL); bdref_key_dot = g_strdup_printf("%s.", bdref_key); qdict_extract_subqdict(options, &image_options, bdref_key_dot); g_free(bdref_key_dot); reference = qdict_get_try_str(options, bdref_key); if (!filename && !reference && !qdict_size(image_options)) { if (allow_none) { ret = 0; } else { error_setg(errp, "A block device must be specified for \"%s\"", bdref_key); ret = -EINVAL; } QDECREF(image_options); goto done; } ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp); done: qdict_del(options, bdref_key); return ret; } int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp) { /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ char *tmp_filename = g_malloc0(PATH_MAX + 1); int64_t total_size; BlockDriver *bdrv_qcow2; QemuOpts *opts = NULL; QDict *snapshot_options; BlockDriverState *bs_snapshot; Error *local_err; int ret; /* if snapshot, we create a temporary backing file and open it instead of opening 'filename' directly */ /* Get the required size from the image */ total_size = bdrv_getlength(bs); if (total_size < 0) { ret = total_size; error_setg_errno(errp, -total_size, "Could not get image size"); goto out; } /* Create the temporary image */ ret = get_tmp_filename(tmp_filename, PATH_MAX + 1); if (ret < 0) { error_setg_errno(errp, -ret, "Could not get temporary filename"); goto out; } bdrv_qcow2 = bdrv_find_format("qcow2"); opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0, &error_abort); qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size); ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err); qemu_opts_del(opts); if (ret < 0) { error_setg_errno(errp, -ret, "Could not create temporary overlay " "'%s': %s", tmp_filename, error_get_pretty(local_err)); error_free(local_err); goto out; } /* Prepare a new options QDict for the temporary file */ snapshot_options = qdict_new(); qdict_put(snapshot_options, "file.driver", qstring_from_str("file")); qdict_put(snapshot_options, "file.filename", qstring_from_str(tmp_filename)); bs_snapshot = bdrv_new(); ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options, flags, bdrv_qcow2, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto out; } bdrv_append(bs_snapshot, bs); out: g_free(tmp_filename); return ret; } /* * Opens a disk image (raw, qcow2, vmdk, ...) * * options is a QDict of options to pass to the block drivers, or NULL for an * empty set of options. The reference to the QDict belongs to the block layer * after the call (even on failure), so if the caller intends to reuse the * dictionary, it needs to use QINCREF() before calling bdrv_open. * * If *pbs is NULL, a new BDS will be created with a pointer to it stored there. * If it is not NULL, the referenced BDS will be reused. * * The reference parameter may be used to specify an existing block device which * should be opened. If specified, neither options nor a filename may be given, * nor can an existing BDS be reused (that is, *pbs has to be NULL). */ int bdrv_open(BlockDriverState **pbs, const char *filename, const char *reference, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret; BlockDriverState *file = NULL, *bs; const char *drvname; Error *local_err = NULL; int snapshot_flags = 0; assert(pbs); if (reference) { bool options_non_empty = options ? qdict_size(options) : false; QDECREF(options); if (*pbs) { error_setg(errp, "Cannot reuse an existing BDS when referencing " "another block device"); return -EINVAL; } if (filename || options_non_empty) { error_setg(errp, "Cannot reference an existing block device with " "additional options or a new filename"); return -EINVAL; } bs = bdrv_lookup_bs(reference, reference, errp); if (!bs) { return -ENODEV; } bdrv_ref(bs); *pbs = bs; return 0; } if (*pbs) { bs = *pbs; } else { bs = bdrv_new(); } /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err); if (local_err) { goto fail; } /* Find the right image format driver */ drv = NULL; drvname = qdict_get_try_str(options, "driver"); if (drvname) { drv = bdrv_find_format(drvname); qdict_del(options, "driver"); if (!drv) { error_setg(errp, "Unknown driver: '%s'", drvname); ret = -EINVAL; goto fail; } } assert(drvname || !(flags & BDRV_O_PROTOCOL)); if (drv && !drv->bdrv_file_open) { /* If the user explicitly wants a format driver here, we'll need to add * another layer for the protocol in bs->file */ flags &= ~BDRV_O_PROTOCOL; } bs->options = options; options = qdict_clone_shallow(options); /* Open image file without format layer */ if ((flags & BDRV_O_PROTOCOL) == 0) { if (flags & BDRV_O_RDWR) { flags |= BDRV_O_ALLOW_RDWR; } if (flags & BDRV_O_SNAPSHOT) { snapshot_flags = bdrv_temp_snapshot_flags(flags); flags = bdrv_backing_flags(flags); } assert(file == NULL); ret = bdrv_open_image(&file, filename, options, "file", bdrv_inherited_flags(flags), true, &local_err); if (ret < 0) { goto fail; } } /* Image format probing */ if (!drv && file) { ret = find_image_format(file, filename, &drv, &local_err); if (ret < 0) { goto fail; } } else if (!drv) { error_setg(errp, "Must specify either driver or file"); ret = -EINVAL; goto fail; } /* Open the image */ ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); if (ret < 0) { goto fail; } if (file && (bs->file != file)) { bdrv_unref(file); file = NULL; } /* If there is a backing file, use it */ if ((flags & BDRV_O_NO_BACKING) == 0) { QDict *backing_options; qdict_extract_subqdict(options, &backing_options, "backing."); ret = bdrv_open_backing_file(bs, backing_options, &local_err); if (ret < 0) { goto close_and_fail; } } bdrv_refresh_filename(bs); /* For snapshot=on, create a temporary qcow2 overlay. bs points to the * temporary snapshot afterwards. */ if (snapshot_flags) { ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err); if (local_err) { goto close_and_fail; } } /* Check if any unknown options were used */ if (options && (qdict_size(options) != 0)) { const QDictEntry *entry = qdict_first(options); if (flags & BDRV_O_PROTOCOL) { error_setg(errp, "Block protocol '%s' doesn't support the option " "'%s'", drv->format_name, entry->key); } else { error_setg(errp, "Block format '%s' used by device '%s' doesn't " "support the option '%s'", drv->format_name, bdrv_get_device_name(bs), entry->key); } ret = -EINVAL; goto close_and_fail; } if (!bdrv_key_required(bs)) { if (bs->blk) { blk_dev_change_media_cb(bs->blk, true); } } else if (!runstate_check(RUN_STATE_PRELAUNCH) && !runstate_check(RUN_STATE_INMIGRATE) && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */ error_setg(errp, "Guest must be stopped for opening of encrypted image"); ret = -EBUSY; goto close_and_fail; } QDECREF(options); *pbs = bs; return 0; fail: if (file != NULL) { bdrv_unref(file); } QDECREF(bs->options); QDECREF(options); bs->options = NULL; if (!*pbs) { /* If *pbs is NULL, a new BDS has been created in this function and needs to be freed now. Otherwise, it does not need to be closed, since it has not really been opened yet. */ bdrv_unref(bs); } if (local_err) { error_propagate(errp, local_err); } return ret; close_and_fail: /* See fail path, but now the BDS has to be always closed */ if (*pbs) { bdrv_close(bs); } else { bdrv_unref(bs); } QDECREF(options); if (local_err) { error_propagate(errp, local_err); } return ret; } typedef struct BlockReopenQueueEntry { bool prepared; BDRVReopenState state; QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry; } BlockReopenQueueEntry; /* * Adds a BlockDriverState to a simple queue for an atomic, transactional * reopen of multiple devices. * * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT * already performed, or alternatively may be NULL a new BlockReopenQueue will * be created and initialized. This newly created BlockReopenQueue should be * passed back in for subsequent calls that are intended to be of the same * atomic 'set'. * * bs is the BlockDriverState to add to the reopen queue. * * flags contains the open flags for the associated bs * * returns a pointer to bs_queue, which is either the newly allocated * bs_queue, or the existing bs_queue being used. * */ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, int flags) { assert(bs != NULL); BlockReopenQueueEntry *bs_entry; if (bs_queue == NULL) { bs_queue = g_new0(BlockReopenQueue, 1); QSIMPLEQ_INIT(bs_queue); } /* bdrv_open() masks this flag out */ flags &= ~BDRV_O_PROTOCOL; if (bs->file) { bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags)); } bs_entry = g_new0(BlockReopenQueueEntry, 1); QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); bs_entry->state.bs = bs; bs_entry->state.flags = flags; return bs_queue; } /* * Reopen multiple BlockDriverStates atomically & transactionally. * * The queue passed in (bs_queue) must have been built up previous * via bdrv_reopen_queue(). * * Reopens all BDS specified in the queue, with the appropriate * flags. All devices are prepared for reopen, and failure of any * device will cause all device changes to be abandonded, and intermediate * data cleaned up. * * If all devices prepare successfully, then the changes are committed * to all devices. * */ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) { int ret = -1; BlockReopenQueueEntry *bs_entry, *next; Error *local_err = NULL; assert(bs_queue != NULL); bdrv_drain_all(); QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) { error_propagate(errp, local_err); goto cleanup; } bs_entry->prepared = true; } /* If we reach this point, we have success and just need to apply the * changes */ QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { bdrv_reopen_commit(&bs_entry->state); } ret = 0; cleanup: QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { if (ret && bs_entry->prepared) { bdrv_reopen_abort(&bs_entry->state); } g_free(bs_entry); } g_free(bs_queue); return ret; } /* Reopen a single BlockDriverState with the specified flags. */ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) { int ret = -1; Error *local_err = NULL; BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); ret = bdrv_reopen_multiple(queue, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); } return ret; } /* * Prepares a BlockDriverState for reopen. All changes are staged in the * 'opaque' field of the BDRVReopenState, which is used and allocated by * the block driver layer .bdrv_reopen_prepare() * * bs is the BlockDriverState to reopen * flags are the new open flags * queue is the reopen queue * * Returns 0 on success, non-zero on error. On error errp will be set * as well. * * On failure, bdrv_reopen_abort() will be called to clean up any data. * It is the responsibility of the caller to then call the abort() or * commit() for any other BDS that have been left in a prepare() state * */ int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp) { int ret = -1; Error *local_err = NULL; BlockDriver *drv; assert(reopen_state != NULL); assert(reopen_state->bs->drv != NULL); drv = reopen_state->bs->drv; /* if we are to stay read-only, do not allow permission change * to r/w */ if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && reopen_state->flags & BDRV_O_RDWR) { error_set(errp, QERR_DEVICE_IS_READ_ONLY, bdrv_get_device_name(reopen_state->bs)); goto error; } ret = bdrv_flush(reopen_state->bs); if (ret) { error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive", strerror(-ret)); goto error; } if (drv->bdrv_reopen_prepare) { ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); if (ret) { if (local_err != NULL) { error_propagate(errp, local_err); } else { error_setg(errp, "failed while preparing to reopen image '%s'", reopen_state->bs->filename); } goto error; } } else { /* It is currently mandatory to have a bdrv_reopen_prepare() * handler for each supported drv. */ error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, drv->format_name, bdrv_get_device_name(reopen_state->bs), "reopening of file"); ret = -1; goto error; } ret = 0; error: return ret; } /* * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and * makes them final by swapping the staging BlockDriverState contents into * the active BlockDriverState contents. */ void bdrv_reopen_commit(BDRVReopenState *reopen_state) { BlockDriver *drv; assert(reopen_state != NULL); drv = reopen_state->bs->drv; assert(drv != NULL); /* If there are any driver level actions to take */ if (drv->bdrv_reopen_commit) { drv->bdrv_reopen_commit(reopen_state); } /* set BDS specific flags now */ reopen_state->bs->open_flags = reopen_state->flags; reopen_state->bs->enable_write_cache = !!(reopen_state->flags & BDRV_O_CACHE_WB); reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR); bdrv_refresh_limits(reopen_state->bs, NULL); } /* * Abort the reopen, and delete and free the staged changes in * reopen_state */ void bdrv_reopen_abort(BDRVReopenState *reopen_state) { BlockDriver *drv; assert(reopen_state != NULL); drv = reopen_state->bs->drv; assert(drv != NULL); if (drv->bdrv_reopen_abort) { drv->bdrv_reopen_abort(reopen_state); } } void bdrv_close(BlockDriverState *bs) { BdrvAioNotifier *ban, *ban_next; if (bs->job) { block_job_cancel_sync(bs->job); } bdrv_drain_all(); /* complete I/O */ bdrv_flush(bs); bdrv_drain_all(); /* in case flush left pending I/O */ notifier_list_notify(&bs->close_notifiers, bs); if (bs->drv) { if (bs->backing_hd) { BlockDriverState *backing_hd = bs->backing_hd; bdrv_set_backing_hd(bs, NULL); bdrv_unref(backing_hd); } bs->drv->bdrv_close(bs); g_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; bs->copy_on_read = 0; bs->backing_file[0] = '\0'; bs->backing_format[0] = '\0'; bs->total_sectors = 0; bs->encrypted = 0; bs->valid_key = 0; bs->sg = 0; bs->growable = 0; bs->zero_beyond_eof = false; QDECREF(bs->options); bs->options = NULL; QDECREF(bs->full_open_options); bs->full_open_options = NULL; if (bs->file != NULL) { bdrv_unref(bs->file); bs->file = NULL; } } if (bs->blk) { blk_dev_change_media_cb(bs->blk, false); } /*throttling disk I/O limits*/ if (bs->io_limits_enabled) { bdrv_io_limits_disable(bs); } QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { g_free(ban); } QLIST_INIT(&bs->aio_notifiers); } void bdrv_close_all(void) { BlockDriverState *bs; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); bdrv_close(bs); aio_context_release(aio_context); } } /* Check if any requests are in-flight (including throttled requests) */ static bool bdrv_requests_pending(BlockDriverState *bs) { if (!QLIST_EMPTY(&bs->tracked_requests)) { return true; } if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { return true; } if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { return true; } if (bs->file && bdrv_requests_pending(bs->file)) { return true; } if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { return true; } return false; } /* * Wait for pending requests to complete across all BlockDriverStates * * This function does not flush data to disk, use bdrv_flush_all() for that * after calling this function. * * Note that completion of an asynchronous I/O operation can trigger any * number of other I/O operations on other devices---for example a coroutine * can be arbitrarily complex and a constant flow of I/O can come until the * coroutine is complete. Because of this, it is not possible to have a * function to drain a single device's I/O queue. */ void bdrv_drain_all(void) { /* Always run first iteration so any pending completion BHs run */ bool busy = true; BlockDriverState *bs; while (busy) { busy = false; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); bool bs_busy; aio_context_acquire(aio_context); bdrv_flush_io_queue(bs); bdrv_start_throttled_reqs(bs); bs_busy = bdrv_requests_pending(bs); bs_busy |= aio_poll(aio_context, bs_busy); aio_context_release(aio_context); busy |= bs_busy; } } } /* make a BlockDriverState anonymous by removing from bdrv_state and * graph_bdrv_state list. Also, NULL terminate the device_name to prevent double remove */ void bdrv_make_anon(BlockDriverState *bs) { /* * Take care to remove bs from bdrv_states only when it's actually * in it. Note that bs->device_list.tqe_prev is initially null, * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by * resetting it to null on remove. */ if (bs->device_list.tqe_prev) { QTAILQ_REMOVE(&bdrv_states, bs, device_list); bs->device_list.tqe_prev = NULL; } if (bs->node_name[0] != '\0') { QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list); } bs->node_name[0] = '\0'; } static void bdrv_rebind(BlockDriverState *bs) { if (bs->drv && bs->drv->bdrv_rebind) { bs->drv->bdrv_rebind(bs); } } static void bdrv_move_feature_fields(BlockDriverState *bs_dest, BlockDriverState *bs_src) { /* move some fields that need to stay attached to the device */ /* dev info */ bs_dest->guest_block_size = bs_src->guest_block_size; bs_dest->copy_on_read = bs_src->copy_on_read; bs_dest->enable_write_cache = bs_src->enable_write_cache; /* i/o throttled req */ memcpy(&bs_dest->throttle_state, &bs_src->throttle_state, sizeof(ThrottleState)); bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0]; bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1]; bs_dest->io_limits_enabled = bs_src->io_limits_enabled; /* r/w error */ bs_dest->on_read_error = bs_src->on_read_error; bs_dest->on_write_error = bs_src->on_write_error; /* i/o status */ bs_dest->iostatus_enabled = bs_src->iostatus_enabled; bs_dest->iostatus = bs_src->iostatus; /* dirty bitmap */ bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps; /* reference count */ bs_dest->refcnt = bs_src->refcnt; /* job */ bs_dest->job = bs_src->job; /* keep the same entry in bdrv_states */ bs_dest->device_list = bs_src->device_list; bs_dest->blk = bs_src->blk; memcpy(bs_dest->op_blockers, bs_src->op_blockers, sizeof(bs_dest->op_blockers)); } /* * Swap bs contents for two image chains while they are live, * while keeping required fields on the BlockDriverState that is * actually attached to a device. * * This will modify the BlockDriverState fields, and swap contents * between bs_new and bs_old. Both bs_new and bs_old are modified. * * bs_new must not be attached to a BlockBackend. * * This function does not create any image files. */ void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) { BlockDriverState tmp; /* The code needs to swap the node_name but simply swapping node_list won't * work so first remove the nodes from the graph list, do the swap then * insert them back if needed. */ if (bs_new->node_name[0] != '\0') { QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list); } if (bs_old->node_name[0] != '\0') { QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list); } /* bs_new must be unattached and shouldn't have anything fancy enabled */ assert(!bs_new->blk); assert(QLIST_EMPTY(&bs_new->dirty_bitmaps)); assert(bs_new->job == NULL); assert(bs_new->io_limits_enabled == false); assert(!throttle_have_timer(&bs_new->throttle_state)); tmp = *bs_new; *bs_new = *bs_old; *bs_old = tmp; /* there are some fields that should not be swapped, move them back */ bdrv_move_feature_fields(&tmp, bs_old); bdrv_move_feature_fields(bs_old, bs_new); bdrv_move_feature_fields(bs_new, &tmp); /* bs_new must remain unattached */ assert(!bs_new->blk); /* Check a few fields that should remain attached to the device */ assert(bs_new->job == NULL); assert(bs_new->io_limits_enabled == false); assert(!throttle_have_timer(&bs_new->throttle_state)); /* insert the nodes back into the graph node list if needed */ if (bs_new->node_name[0] != '\0') { QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list); } if (bs_old->node_name[0] != '\0') { QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list); } bdrv_rebind(bs_new); bdrv_rebind(bs_old); } /* * Add new bs contents at the top of an image chain while the chain is * live, while keeping required fields on the top layer. * * This will modify the BlockDriverState fields, and swap contents * between bs_new and bs_top. Both bs_new and bs_top are modified. * * bs_new must not be attached to a BlockBackend. * * This function does not create any image files. */ void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) { bdrv_swap(bs_new, bs_top); /* The contents of 'tmp' will become bs_top, as we are * swapping bs_new and bs_top contents. */ bdrv_set_backing_hd(bs_top, bs_new); } static void bdrv_delete(BlockDriverState *bs) { assert(!bs->job); assert(bdrv_op_blocker_is_empty(bs)); assert(!bs->refcnt); assert(QLIST_EMPTY(&bs->dirty_bitmaps)); bdrv_close(bs); /* remove from list, if necessary */ bdrv_make_anon(bs); g_free(bs); } /* * Run consistency checks on an image * * Returns 0 if the check could be completed (it doesn't mean that the image is * free of errors) or -errno when an internal error occurred. The results of the * check are stored in res. */ int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { if (bs->drv == NULL) { return -ENOMEDIUM; } if (bs->drv->bdrv_check == NULL) { return -ENOTSUP; } memset(res, 0, sizeof(*res)); return bs->drv->bdrv_check(bs, res, fix); } #define COMMIT_BUF_SECTORS 2048 /* commit COW file into the raw image */ int bdrv_commit(BlockDriverState *bs) { BlockDriver *drv = bs->drv; int64_t sector, total_sectors, length, backing_length; int n, ro, open_flags; int ret = 0; uint8_t *buf = NULL; char filename[PATH_MAX]; if (!drv) return -ENOMEDIUM; if (!bs->backing_hd) { return -ENOTSUP; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) || bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) { return -EBUSY; } ro = bs->backing_hd->read_only; /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */ pstrcpy(filename, sizeof(filename), bs->backing_hd->filename); open_flags = bs->backing_hd->open_flags; if (ro) { if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) { return -EACCES; } } length = bdrv_getlength(bs); if (length < 0) { ret = length; goto ro_cleanup; } backing_length = bdrv_getlength(bs->backing_hd); if (backing_length < 0) { ret = backing_length; goto ro_cleanup; } /* If our top snapshot is larger than the backing file image, * grow the backing file image if possible. If not possible, * we must return an error */ if (length > backing_length) { ret = bdrv_truncate(bs->backing_hd, length); if (ret < 0) { goto ro_cleanup; } } total_sectors = length >> BDRV_SECTOR_BITS; /* qemu_try_blockalign() for bs will choose an alignment that works for * bs->backing_hd as well, so no need to compare the alignment manually. */ buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); if (buf == NULL) { ret = -ENOMEM; goto ro_cleanup; } for (sector = 0; sector < total_sectors; sector += n) { ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n); if (ret < 0) { goto ro_cleanup; } if (ret) { ret = bdrv_read(bs, sector, buf, n); if (ret < 0) { goto ro_cleanup; } ret = bdrv_write(bs->backing_hd, sector, buf, n); if (ret < 0) { goto ro_cleanup; } } } if (drv->bdrv_make_empty) { ret = drv->bdrv_make_empty(bs); if (ret < 0) { goto ro_cleanup; } bdrv_flush(bs); } /* * Make sure all data we wrote to the backing device is actually * stable on disk. */ if (bs->backing_hd) { bdrv_flush(bs->backing_hd); } ret = 0; ro_cleanup: qemu_vfree(buf); if (ro) { /* ignoring error return here */ bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL); } return ret; } int bdrv_commit_all(void) { BlockDriverState *bs; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bs->drv && bs->backing_hd) { int ret = bdrv_commit(bs); if (ret < 0) { aio_context_release(aio_context); return ret; } } aio_context_release(aio_context); } return 0; } /** * Remove an active request from the tracked requests list * * This function should be called when a tracked request is completing. */ static void tracked_request_end(BdrvTrackedRequest *req) { if (req->serialising) { req->bs->serialising_in_flight--; } QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); } /** * Add an active request to the tracked requests list */ static void tracked_request_begin(BdrvTrackedRequest *req, BlockDriverState *bs, int64_t offset, unsigned int bytes, bool is_write) { *req = (BdrvTrackedRequest){ .bs = bs, .offset = offset, .bytes = bytes, .is_write = is_write, .co = qemu_coroutine_self(), .serialising = false, .overlap_offset = offset, .overlap_bytes = bytes, }; qemu_co_queue_init(&req->wait_queue); QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); } static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) { int64_t overlap_offset = req->offset & ~(align - 1); unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) - overlap_offset; if (!req->serialising) { req->bs->serialising_in_flight++; req->serialising = true; } req->overlap_offset = MIN(req->overlap_offset, overlap_offset); req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); } /** * Round a region to cluster boundaries */ void bdrv_round_to_clusters(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int64_t *cluster_sector_num, int *cluster_nb_sectors) { BlockDriverInfo bdi; if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { *cluster_sector_num = sector_num; *cluster_nb_sectors = nb_sectors; } else { int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + nb_sectors, c); } } static int bdrv_get_cluster_size(BlockDriverState *bs) { BlockDriverInfo bdi; int ret; ret = bdrv_get_info(bs, &bdi); if (ret < 0 || bdi.cluster_size == 0) { return bs->request_alignment; } else { return bdi.cluster_size; } } static bool tracked_request_overlaps(BdrvTrackedRequest *req, int64_t offset, unsigned int bytes) { /* aaaa bbbb */ if (offset >= req->overlap_offset + req->overlap_bytes) { return false; } /* bbbb aaaa */ if (req->overlap_offset >= offset + bytes) { return false; } return true; } static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) { BlockDriverState *bs = self->bs; BdrvTrackedRequest *req; bool retry; bool waited = false; if (!bs->serialising_in_flight) { return false; } do { retry = false; QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req == self || (!req->serialising && !self->serialising)) { continue; } if (tracked_request_overlaps(req, self->overlap_offset, self->overlap_bytes)) { /* Hitting this means there was a reentrant request, for * example, a block driver issuing nested requests. This must * never happen since it means deadlock. */ assert(qemu_coroutine_self() != req->co); /* If the request is already (indirectly) waiting for us, or * will wait for us as soon as it wakes up, then just go on * (instead of producing a deadlock in the former case). */ if (!req->waiting_for) { self->waiting_for = req; qemu_co_queue_wait(&req->wait_queue); self->waiting_for = NULL; retry = true; waited = true; break; } } } } while (retry); return waited; } /* * Return values: * 0 - success * -EINVAL - backing format specified, but no file * -ENOSPC - can't update the backing file because no space is left in the * image file header * -ENOTSUP - format driver doesn't support changing the backing file */ int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, const char *backing_fmt) { BlockDriver *drv = bs->drv; int ret; /* Backing file format doesn't make sense without a backing file */ if (backing_fmt && !backing_file) { return -EINVAL; } if (drv->bdrv_change_backing_file != NULL) { ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); } else { ret = -ENOTSUP; } if (ret == 0) { pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); } return ret; } /* * Finds the image layer in the chain that has 'bs' as its backing file. * * active is the current topmost image. * * Returns NULL if bs is not found in active's image chain, * or if active == bs. * * Returns the bottommost base image if bs == NULL. */ BlockDriverState *bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs) { while (active && bs != active->backing_hd) { active = active->backing_hd; } return active; } /* Given a BDS, searches for the base layer. */ BlockDriverState *bdrv_find_base(BlockDriverState *bs) { return bdrv_find_overlay(bs, NULL); } typedef struct BlkIntermediateStates { BlockDriverState *bs; QSIMPLEQ_ENTRY(BlkIntermediateStates) entry; } BlkIntermediateStates; /* * Drops images above 'base' up to and including 'top', and sets the image * above 'top' to have base as its backing file. * * Requires that the overlay to 'top' is opened r/w, so that the backing file * information in 'bs' can be properly updated. * * E.g., this will convert the following chain: * bottom <- base <- intermediate <- top <- active * * to * * bottom <- base <- active * * It is allowed for bottom==base, in which case it converts: * * base <- intermediate <- top <- active * * to * * base <- active * * If backing_file_str is non-NULL, it will be used when modifying top's * overlay image metadata. * * Error conditions: * if active == top, that is considered an error * */ int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, BlockDriverState *base, const char *backing_file_str) { BlockDriverState *intermediate; BlockDriverState *base_bs = NULL; BlockDriverState *new_top_bs = NULL; BlkIntermediateStates *intermediate_state, *next; int ret = -EIO; QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete; QSIMPLEQ_INIT(&states_to_delete); if (!top->drv || !base->drv) { goto exit; } new_top_bs = bdrv_find_overlay(active, top); if (new_top_bs == NULL) { /* we could not find the image above 'top', this is an error */ goto exit; } /* special case of new_top_bs->backing_hd already pointing to base - nothing * to do, no intermediate images */ if (new_top_bs->backing_hd == base) { ret = 0; goto exit; } intermediate = top; /* now we will go down through the list, and add each BDS we find * into our deletion queue, until we hit the 'base' */ while (intermediate) { intermediate_state = g_new0(BlkIntermediateStates, 1); intermediate_state->bs = intermediate; QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry); if (intermediate->backing_hd == base) { base_bs = intermediate->backing_hd; break; } intermediate = intermediate->backing_hd; } if (base_bs == NULL) { /* something went wrong, we did not end at the base. safely * unravel everything, and exit with error */ goto exit; } /* success - we can delete the intermediate states, and link top->base */ backing_file_str = backing_file_str ? backing_file_str : base_bs->filename; ret = bdrv_change_backing_file(new_top_bs, backing_file_str, base_bs->drv ? base_bs->drv->format_name : ""); if (ret) { goto exit; } bdrv_set_backing_hd(new_top_bs, base_bs); QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { /* so that bdrv_close() does not recursively close the chain */ bdrv_set_backing_hd(intermediate_state->bs, NULL); bdrv_unref(intermediate_state->bs); } ret = 0; exit: QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { g_free(intermediate_state); } return ret; } static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, size_t size) { int64_t len; if (size > INT_MAX) { return -EIO; } if (!bdrv_is_inserted(bs)) return -ENOMEDIUM; if (bs->growable) return 0; len = bdrv_getlength(bs); if (offset < 0) return -EIO; if ((offset > len) || (len - offset < size)) return -EIO; return 0; } static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { return -EIO; } return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE); } typedef struct RwCo { BlockDriverState *bs; int64_t offset; QEMUIOVector *qiov; bool is_write; int ret; BdrvRequestFlags flags; } RwCo; static void coroutine_fn bdrv_rw_co_entry(void *opaque) { RwCo *rwco = opaque; if (!rwco->is_write) { rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, rwco->qiov->size, rwco->qiov, rwco->flags); } else { rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, rwco->qiov->size, rwco->qiov, rwco->flags); } } /* * Process a vectored synchronous request using coroutines */ static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov, bool is_write, BdrvRequestFlags flags) { Coroutine *co; RwCo rwco = { .bs = bs, .offset = offset, .qiov = qiov, .is_write = is_write, .ret = NOT_DONE, .flags = flags, }; /** * In sync call context, when the vcpu is blocked, this throttling timer * will not fire; so the I/O throttling function has to be disabled here * if it has been enabled. */ if (bs->io_limits_enabled) { fprintf(stderr, "Disabling I/O throttling on '%s' due " "to synchronous I/O.\n", bdrv_get_device_name(bs)); bdrv_io_limits_disable(bs); } if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_rw_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_rw_co_entry); qemu_coroutine_enter(co, &rwco); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; } /* * Process a synchronous request using coroutines */ static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors, bool is_write, BdrvRequestFlags flags) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *)buf, .iov_len = nb_sectors * BDRV_SECTOR_SIZE, }; if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { return -EINVAL; } qemu_iovec_init_external(&qiov, &iov, 1); return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, &qiov, is_write, flags); } /* return < 0 if error. See bdrv_write() for the return codes */ int bdrv_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); } /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { bool enabled; int ret; enabled = bs->io_limits_enabled; bs->io_limits_enabled = false; ret = bdrv_read(bs, sector_num, buf, nb_sectors); bs->io_limits_enabled = enabled; return ret; } /* Return < 0 if error. Important errors are: -EIO generic I/O error (may happen for all errors) -ENOMEDIUM No media inserted. -EINVAL Invalid sector number or nb_sectors -EACCES Trying to write a read-only device */ int bdrv_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); } int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, BDRV_REQ_ZERO_WRITE | flags); } /* * Completely zero out a block device with the help of bdrv_write_zeroes. * The operation is sped up by checking the block status and only writing * zeroes to the device if they currently do not return zeroes. Optional * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). * * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). */ int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) { int64_t target_sectors, ret, nb_sectors, sector_num = 0; int n; target_sectors = bdrv_nb_sectors(bs); if (target_sectors < 0) { return target_sectors; } for (;;) { nb_sectors = target_sectors - sector_num; if (nb_sectors <= 0) { return 0; } if (nb_sectors > INT_MAX) { nb_sectors = INT_MAX; } ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); if (ret < 0) { error_report("error getting block status at sector %" PRId64 ": %s", sector_num, strerror(-ret)); return ret; } if (ret & BDRV_BLOCK_ZERO) { sector_num += n; continue; } ret = bdrv_write_zeroes(bs, sector_num, n, flags); if (ret < 0) { error_report("error writing zeroes at sector %" PRId64 ": %s", sector_num, strerror(-ret)); return ret; } sector_num += n; } } int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *)buf, .iov_len = bytes, }; int ret; if (bytes < 0) { return -EINVAL; } qemu_iovec_init_external(&qiov, &iov, 1); ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); if (ret < 0) { return ret; } return bytes; } int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) { int ret; ret = bdrv_prwv_co(bs, offset, qiov, true, 0); if (ret < 0) { return ret; } return qiov->size; } int bdrv_pwrite(BlockDriverState *bs, int64_t offset, const void *buf, int bytes) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *) buf, .iov_len = bytes, }; if (bytes < 0) { return -EINVAL; } qemu_iovec_init_external(&qiov, &iov, 1); return bdrv_pwritev(bs, offset, &qiov); } /* * Writes to the file and ensures that no writes are reordered across this * request (acts as a barrier) * * Returns 0 on success, -errno in error cases. */ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, const void *buf, int count) { int ret; ret = bdrv_pwrite(bs, offset, buf, count); if (ret < 0) { return ret; } /* No flush needed for cache modes that already do it */ if (bs->enable_write_cache) { bdrv_flush(bs); } return 0; } static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { /* Perform I/O through a temporary buffer so that users who scribble over * their read buffer while the operation is in progress do not end up * modifying the image file. This is critical for zero-copy guest I/O * where anything might happen inside guest memory. */ void *bounce_buffer; BlockDriver *drv = bs->drv; struct iovec iov; QEMUIOVector bounce_qiov; int64_t cluster_sector_num; int cluster_nb_sectors; size_t skip_bytes; int ret; /* Cover entire cluster so no additional backing file I/O is required when * allocating cluster in the image file. */ bdrv_round_to_clusters(bs, sector_num, nb_sectors, &cluster_sector_num, &cluster_nb_sectors); trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, cluster_sector_num, cluster_nb_sectors); iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); if (bounce_buffer == NULL) { ret = -ENOMEM; goto err; } qemu_iovec_init_external(&bounce_qiov, &iov, 1); ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, &bounce_qiov); if (ret < 0) { goto err; } if (drv->bdrv_co_write_zeroes && buffer_is_zero(bounce_buffer, iov.iov_len)) { ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, cluster_nb_sectors, 0); } else { /* This does not change the data on the disk, it is not necessary * to flush even in cache=writethrough mode. */ ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, &bounce_qiov); } if (ret < 0) { /* It might be okay to ignore write errors for guest requests. If this * is a deliberate copy-on-read then we don't want to ignore the error. * Simply report it in all cases. */ goto err; } skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, nb_sectors * BDRV_SECTOR_SIZE); err: qemu_vfree(bounce_buffer); return ret; } /* * Forwards an already correctly aligned request to the BlockDriver. This * handles copy on read and zeroing after EOF; any other features must be * implemented by the caller. */ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, int64_t align, QEMUIOVector *qiov, int flags) { BlockDriver *drv = bs->drv; int ret; int64_t sector_num = offset >> BDRV_SECTOR_BITS; unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); assert(!qiov || bytes == qiov->size); /* Handle Copy on Read and associated serialisation */ if (flags & BDRV_REQ_COPY_ON_READ) { /* If we touch the same cluster it counts as an overlap. This * guarantees that allocating writes will be serialized and not race * with each other for the same cluster. For example, in copy-on-read * it ensures that the CoR read and write operations are atomic and * guest writes cannot interleave between them. */ mark_request_serialising(req, bdrv_get_cluster_size(bs)); } wait_serialising_requests(req); if (flags & BDRV_REQ_COPY_ON_READ) { int pnum; ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); if (ret < 0) { goto out; } if (!ret || pnum != nb_sectors) { ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); goto out; } } /* Forward the request to the BlockDriver */ if (!(bs->zero_beyond_eof && bs->growable)) { ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); } else { /* Read zeros after EOF of growable BDSes */ int64_t total_sectors, max_nb_sectors; total_sectors = bdrv_nb_sectors(bs); if (total_sectors < 0) { ret = total_sectors; goto out; } max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), align >> BDRV_SECTOR_BITS); if (max_nb_sectors > 0) { QEMUIOVector local_qiov; size_t local_sectors; max_nb_sectors = MIN(max_nb_sectors, SIZE_MAX / BDRV_SECTOR_BITS); local_sectors = MIN(max_nb_sectors, nb_sectors); qemu_iovec_init(&local_qiov, qiov->niov); qemu_iovec_concat(&local_qiov, qiov, 0, local_sectors * BDRV_SECTOR_SIZE); ret = drv->bdrv_co_readv(bs, sector_num, local_sectors, &local_qiov); qemu_iovec_destroy(&local_qiov); } else { ret = 0; } /* Reading beyond end of file is supposed to produce zeroes */ if (ret == 0 && total_sectors < sector_num + nb_sectors) { uint64_t offset = MAX(0, total_sectors - sector_num); uint64_t bytes = (sector_num + nb_sectors - offset) * BDRV_SECTOR_SIZE; qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); } } out: return ret; } /* * Handle a read request in coroutine context */ static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; BdrvTrackedRequest req; /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); uint8_t *head_buf = NULL; uint8_t *tail_buf = NULL; QEMUIOVector local_qiov; bool use_local_qiov = false; int ret; if (!drv) { return -ENOMEDIUM; } if (bdrv_check_byte_request(bs, offset, bytes)) { return -EIO; } if (bs->copy_on_read) { flags |= BDRV_REQ_COPY_ON_READ; } /* throttling disk I/O */ if (bs->io_limits_enabled) { bdrv_io_limits_intercept(bs, bytes, false); } /* Align read if necessary by padding qiov */ if (offset & (align - 1)) { head_buf = qemu_blockalign(bs, align); qemu_iovec_init(&local_qiov, qiov->niov + 2); qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; bytes += offset & (align - 1); offset = offset & ~(align - 1); } if ((offset + bytes) & (align - 1)) { if (!use_local_qiov) { qemu_iovec_init(&local_qiov, qiov->niov + 1); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; } tail_buf = qemu_blockalign(bs, align); qemu_iovec_add(&local_qiov, tail_buf, align - ((offset + bytes) & (align - 1))); bytes = ROUND_UP(bytes, align); } tracked_request_begin(&req, bs, offset, bytes, false); ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, use_local_qiov ? &local_qiov : qiov, flags); tracked_request_end(&req); if (use_local_qiov) { qemu_iovec_destroy(&local_qiov); qemu_vfree(head_buf); qemu_vfree(tail_buf); } return ret; } static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, BdrvRequestFlags flags) { if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) { return -EINVAL; } return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, nb_sectors << BDRV_SECTOR_BITS, qiov, flags); } int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { trace_bdrv_co_readv(bs, sector_num, nb_sectors); return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); } int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, BDRV_REQ_COPY_ON_READ); } /* if no limit is specified in the BlockLimits use a default * of 32768 512-byte sectors (16 MiB) per request. */ #define MAX_WRITE_ZEROES_DEFAULT 32768 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; QEMUIOVector qiov; struct iovec iov = {0}; int ret = 0; int max_write_zeroes = bs->bl.max_write_zeroes ? bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT; while (nb_sectors > 0 && !ret) { int num = nb_sectors; /* Align request. Block drivers can expect the "bulk" of the request * to be aligned. */ if (bs->bl.write_zeroes_alignment && num > bs->bl.write_zeroes_alignment) { if (sector_num % bs->bl.write_zeroes_alignment != 0) { /* Make a small request up to the first aligned sector. */ num = bs->bl.write_zeroes_alignment; num -= sector_num % bs->bl.write_zeroes_alignment; } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { /* Shorten the request to the last aligned sector. num cannot * underflow because num > bs->bl.write_zeroes_alignment. */ num -= (sector_num + num) % bs->bl.write_zeroes_alignment; } } /* limit request size */ if (num > max_write_zeroes) { num = max_write_zeroes; } ret = -ENOTSUP; /* First try the efficient write zeroes operation */ if (drv->bdrv_co_write_zeroes) { ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); } if (ret == -ENOTSUP) { /* Fall back to bounce buffer if write zeroes is unsupported */ iov.iov_len = num * BDRV_SECTOR_SIZE; if (iov.iov_base == NULL) { iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); if (iov.iov_base == NULL) { ret = -ENOMEM; goto fail; } memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); } qemu_iovec_init_external(&qiov, &iov, 1); ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); /* Keep bounce buffer around if it is big enough for all * all future requests. */ if (num < max_write_zeroes) { qemu_vfree(iov.iov_base); iov.iov_base = NULL; } } sector_num += num; nb_sectors -= num; } fail: qemu_vfree(iov.iov_base); return ret; } /* * Forwards an already correctly aligned write request to the BlockDriver. */ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, int flags) { BlockDriver *drv = bs->drv; bool waited; int ret; int64_t sector_num = offset >> BDRV_SECTOR_BITS; unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); assert(!qiov || bytes == qiov->size); waited = wait_serialising_requests(req); assert(!waited || !req->serialising); assert(req->overlap_offset <= offset); assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && qemu_iovec_is_zero(qiov)) { flags |= BDRV_REQ_ZERO_WRITE; if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { flags |= BDRV_REQ_MAY_UNMAP; } } if (ret < 0) { /* Do nothing, write notifier decided to fail this request */ } else if (flags & BDRV_REQ_ZERO_WRITE) { BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); } else { BLKDBG_EVENT(bs, BLKDBG_PWRITEV); ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); } BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); if (ret == 0 && !bs->enable_write_cache) { ret = bdrv_co_flush(bs); } bdrv_set_dirty(bs, sector_num, nb_sectors); block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); if (bs->growable && ret >= 0) { bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); } return ret; } /* * Handle a write request in coroutine context */ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { BdrvTrackedRequest req; /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); uint8_t *head_buf = NULL; uint8_t *tail_buf = NULL; QEMUIOVector local_qiov; bool use_local_qiov = false; int ret; if (!bs->drv) { return -ENOMEDIUM; } if (bs->read_only) { return -EACCES; } if (bdrv_check_byte_request(bs, offset, bytes)) { return -EIO; } /* throttling disk I/O */ if (bs->io_limits_enabled) { bdrv_io_limits_intercept(bs, bytes, true); } /* * Align write if necessary by performing a read-modify-write cycle. * Pad qiov with the read parts and be sure to have a tracked request not * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. */ tracked_request_begin(&req, bs, offset, bytes, true); if (offset & (align - 1)) { QEMUIOVector head_qiov; struct iovec head_iov; mark_request_serialising(&req, align); wait_serialising_requests(&req); head_buf = qemu_blockalign(bs, align); head_iov = (struct iovec) { .iov_base = head_buf, .iov_len = align, }; qemu_iovec_init_external(&head_qiov, &head_iov, 1); BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, align, &head_qiov, 0); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); qemu_iovec_init(&local_qiov, qiov->niov + 2); qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; bytes += offset & (align - 1); offset = offset & ~(align - 1); } if ((offset + bytes) & (align - 1)) { QEMUIOVector tail_qiov; struct iovec tail_iov; size_t tail_bytes; bool waited; mark_request_serialising(&req, align); waited = wait_serialising_requests(&req); assert(!waited || !use_local_qiov); tail_buf = qemu_blockalign(bs, align); tail_iov = (struct iovec) { .iov_base = tail_buf, .iov_len = align, }; qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, align, &tail_qiov, 0); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); if (!use_local_qiov) { qemu_iovec_init(&local_qiov, qiov->niov + 1); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; } tail_bytes = (offset + bytes) & (align - 1); qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); bytes = ROUND_UP(bytes, align); } ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, use_local_qiov ? &local_qiov : qiov, flags); fail: tracked_request_end(&req); if (use_local_qiov) { qemu_iovec_destroy(&local_qiov); } qemu_vfree(head_buf); qemu_vfree(tail_buf); return ret; } static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, BdrvRequestFlags flags) { if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) { return -EINVAL; } return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, nb_sectors << BDRV_SECTOR_BITS, qiov, flags); } int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { trace_bdrv_co_writev(bs, sector_num, nb_sectors); return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); } int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); if (!(bs->open_flags & BDRV_O_UNMAP)) { flags &= ~BDRV_REQ_MAY_UNMAP; } return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, BDRV_REQ_ZERO_WRITE | flags); } /** * Truncate file to 'offset' bytes (needed only for file protocols) */ int bdrv_truncate(BlockDriverState *bs, int64_t offset) { BlockDriver *drv = bs->drv; int ret; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_truncate) return -ENOTSUP; if (bs->read_only) return -EACCES; ret = drv->bdrv_truncate(bs, offset); if (ret == 0) { ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); if (bs->blk) { blk_dev_resize_cb(bs->blk); } } return ret; } /** * Length of a allocated file in bytes. Sparse files are counted by actual * allocated space. Return < 0 if error or unknown. */ int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (!drv) { return -ENOMEDIUM; } if (drv->bdrv_get_allocated_file_size) { return drv->bdrv_get_allocated_file_size(bs); } if (bs->file) { return bdrv_get_allocated_file_size(bs->file); } return -ENOTSUP; } /** * Return number of sectors on success, -errno on error. */ int64_t bdrv_nb_sectors(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->has_variable_length) { int ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { return ret; } } return bs->total_sectors; } /** * Return length in bytes on success, -errno on error. * The length is always a multiple of BDRV_SECTOR_SIZE. */ int64_t bdrv_getlength(BlockDriverState *bs) { int64_t ret = bdrv_nb_sectors(bs); return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE; } /* return 0 as number of sectors if no device present or error */ void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) { int64_t nb_sectors = bdrv_nb_sectors(bs); *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; } void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, BlockdevOnError on_write_error) { bs->on_read_error = on_read_error; bs->on_write_error = on_write_error; } BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read) { return is_read ? bs->on_read_error : bs->on_write_error; } BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error) { BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error; switch (on_err) { case BLOCKDEV_ON_ERROR_ENOSPC: return (error == ENOSPC) ? BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; case BLOCKDEV_ON_ERROR_STOP: return BLOCK_ERROR_ACTION_STOP; case BLOCKDEV_ON_ERROR_REPORT: return BLOCK_ERROR_ACTION_REPORT; case BLOCKDEV_ON_ERROR_IGNORE: return BLOCK_ERROR_ACTION_IGNORE; default: abort(); } } static void send_qmp_error_event(BlockDriverState *bs, BlockErrorAction action, bool is_read, int error) { BlockErrorAction ac; ac = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; qapi_event_send_block_io_error(bdrv_get_device_name(bs), ac, action, bdrv_iostatus_is_enabled(bs), error == ENOSPC, strerror(error), &error_abort); } /* This is done by device models because, while the block layer knows * about the error, it does not know whether an operation comes from * the device or the block layer (from a job, for example). */ void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, bool is_read, int error) { assert(error >= 0); if (action == BLOCK_ERROR_ACTION_STOP) { /* First set the iostatus, so that "info block" returns an iostatus * that matches the events raised so far (an additional error iostatus * is fine, but not a lost one). */ bdrv_iostatus_set_err(bs, error); /* Then raise the request to stop the VM and the event. * qemu_system_vmstop_request_prepare has two effects. First, * it ensures that the STOP event always comes after the * BLOCK_IO_ERROR event. Second, it ensures that even if management * can observe the STOP event and do a "cont" before the STOP * event is issued, the VM will not stop. In this case, vm_start() * also ensures that the STOP/RESUME pair of events is emitted. */ qemu_system_vmstop_request_prepare(); send_qmp_error_event(bs, action, is_read, error); qemu_system_vmstop_request(RUN_STATE_IO_ERROR); } else { send_qmp_error_event(bs, action, is_read, error); } } int bdrv_is_read_only(BlockDriverState *bs) { return bs->read_only; } int bdrv_is_sg(BlockDriverState *bs) { return bs->sg; } int bdrv_enable_write_cache(BlockDriverState *bs) { return bs->enable_write_cache; } void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) { bs->enable_write_cache = wce; /* so a reopen() will preserve wce */ if (wce) { bs->open_flags |= BDRV_O_CACHE_WB; } else { bs->open_flags &= ~BDRV_O_CACHE_WB; } } int bdrv_is_encrypted(BlockDriverState *bs) { if (bs->backing_hd && bs->backing_hd->encrypted) return 1; return bs->encrypted; } int bdrv_key_required(BlockDriverState *bs) { BlockDriverState *backing_hd = bs->backing_hd; if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) return 1; return (bs->encrypted && !bs->valid_key); } int bdrv_set_key(BlockDriverState *bs, const char *key) { int ret; if (bs->backing_hd && bs->backing_hd->encrypted) { ret = bdrv_set_key(bs->backing_hd, key); if (ret < 0) return ret; if (!bs->encrypted) return 0; } if (!bs->encrypted) { return -EINVAL; } else if (!bs->drv || !bs->drv->bdrv_set_key) { return -ENOMEDIUM; } ret = bs->drv->bdrv_set_key(bs, key); if (ret < 0) { bs->valid_key = 0; } else if (!bs->valid_key) { bs->valid_key = 1; if (bs->blk) { /* call the change callback now, we skipped it on open */ blk_dev_change_media_cb(bs->blk, true); } } return ret; } const char *bdrv_get_format_name(BlockDriverState *bs) { return bs->drv ? bs->drv->format_name : NULL; } static int qsort_strcmp(const void *a, const void *b) { return strcmp(a, b); } void bdrv_iterate_format(void (*it)(void *opaque, const char *name), void *opaque) { BlockDriver *drv; int count = 0; int i; const char **formats = NULL; QLIST_FOREACH(drv, &bdrv_drivers, list) { if (drv->format_name) { bool found = false; int i = count; while (formats && i && !found) { found = !strcmp(formats[--i], drv->format_name); } if (!found) { formats = g_renew(const char *, formats, count + 1); formats[count++] = drv->format_name; } } } qsort(formats, count, sizeof(formats[0]), qsort_strcmp); for (i = 0; i < count; i++) { it(opaque, formats[i]); } g_free(formats); } /* This function is to find block backend bs */ /* TODO convert callers to blk_by_name(), then remove */ BlockDriverState *bdrv_find(const char *name) { BlockBackend *blk = blk_by_name(name); return blk ? blk_bs(blk) : NULL; } /* This function is to find a node in the bs graph */ BlockDriverState *bdrv_find_node(const char *node_name) { BlockDriverState *bs; assert(node_name); QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { if (!strcmp(node_name, bs->node_name)) { return bs; } } return NULL; } /* Put this QMP function here so it can access the static graph_bdrv_states. */ BlockDeviceInfoList *bdrv_named_nodes_list(void) { BlockDeviceInfoList *list, *entry; BlockDriverState *bs; list = NULL; QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { entry = g_malloc0(sizeof(*entry)); entry->value = bdrv_block_device_info(bs); entry->next = list; list = entry; } return list; } BlockDriverState *bdrv_lookup_bs(const char *device, const char *node_name, Error **errp) { BlockBackend *blk; BlockDriverState *bs; if (device) { blk = blk_by_name(device); if (blk) { return blk_bs(blk); } } if (node_name) { bs = bdrv_find_node(node_name); if (bs) { return bs; } } error_setg(errp, "Cannot find device=%s nor node_name=%s", device ? device : "", node_name ? node_name : ""); return NULL; } /* If 'base' is in the same chain as 'top', return true. Otherwise, * return false. If either argument is NULL, return false. */ bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) { while (top && top != base) { top = top->backing_hd; } return top != NULL; } BlockDriverState *bdrv_next(BlockDriverState *bs) { if (!bs) { return QTAILQ_FIRST(&bdrv_states); } return QTAILQ_NEXT(bs, device_list); } /* TODO check what callers really want: bs->node_name or blk_name() */ const char *bdrv_get_device_name(const BlockDriverState *bs) { return bs->blk ? blk_name(bs->blk) : ""; } int bdrv_get_flags(BlockDriverState *bs) { return bs->open_flags; } int bdrv_flush_all(void) { BlockDriverState *bs; int result = 0; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); int ret; aio_context_acquire(aio_context); ret = bdrv_flush(bs); if (ret < 0 && !result) { result = ret; } aio_context_release(aio_context); } return result; } int bdrv_has_zero_init_1(BlockDriverState *bs) { return 1; } int bdrv_has_zero_init(BlockDriverState *bs) { assert(bs->drv); /* If BS is a copy on write image, it is initialized to the contents of the base image, which may not be zeroes. */ if (bs->backing_hd) { return 0; } if (bs->drv->bdrv_has_zero_init) { return bs->drv->bdrv_has_zero_init(bs); } /* safe default */ return 0; } bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs) { BlockDriverInfo bdi; if (bs->backing_hd) { return false; } if (bdrv_get_info(bs, &bdi) == 0) { return bdi.unallocated_blocks_are_zero; } return false; } bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) { BlockDriverInfo bdi; if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) { return false; } if (bdrv_get_info(bs, &bdi) == 0) { return bdi.can_write_zeroes_with_unmap; } return false; } typedef struct BdrvCoGetBlockStatusData { BlockDriverState *bs; BlockDriverState *base; int64_t sector_num; int nb_sectors; int *pnum; int64_t ret; bool done; } BdrvCoGetBlockStatusData; /* * Returns true iff the specified sector is present in the disk image. Drivers * not implementing the functionality are assumed to not support backing files, * hence all their sectors are reported as allocated. * * If 'sector_num' is beyond the end of the disk image the return value is 0 * and 'pnum' is set to 0. * * 'pnum' is set to the number of sectors (including and immediately following * the specified sector) that are known to be in the same * allocated/unallocated state. * * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes * beyond the end of the disk image it will be clamped. */ static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { int64_t total_sectors; int64_t n; int64_t ret, ret2; total_sectors = bdrv_nb_sectors(bs); if (total_sectors < 0) { return total_sectors; } if (sector_num >= total_sectors) { *pnum = 0; return 0; } n = total_sectors - sector_num; if (n < nb_sectors) { nb_sectors = n; } if (!bs->drv->bdrv_co_get_block_status) { *pnum = nb_sectors; ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; if (bs->drv->protocol_name) { ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); } return ret; } ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); if (ret < 0) { *pnum = 0; return ret; } if (ret & BDRV_BLOCK_RAW) { assert(ret & BDRV_BLOCK_OFFSET_VALID); return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, *pnum, pnum); } if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { ret |= BDRV_BLOCK_ALLOCATED; } if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { if (bdrv_unallocated_blocks_are_zero(bs)) { ret |= BDRV_BLOCK_ZERO; } else if (bs->backing_hd) { BlockDriverState *bs2 = bs->backing_hd; int64_t nb_sectors2 = bdrv_nb_sectors(bs2); if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { ret |= BDRV_BLOCK_ZERO; } } } if (bs->file && (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && (ret & BDRV_BLOCK_OFFSET_VALID)) { int file_pnum; ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, *pnum, &file_pnum); if (ret2 >= 0) { /* Ignore errors. This is just providing extra information, it * is useful but not necessary. */ if (!file_pnum) { /* !file_pnum indicates an offset at or beyond the EOF; it is * perfectly valid for the format block driver to point to such * offsets, so catch it and mark everything as zero */ ret |= BDRV_BLOCK_ZERO; } else { /* Limit request to the range reported by the protocol driver */ *pnum = file_pnum; ret |= (ret2 & BDRV_BLOCK_ZERO); } } } return ret; } /* Coroutine wrapper for bdrv_get_block_status() */ static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) { BdrvCoGetBlockStatusData *data = opaque; BlockDriverState *bs = data->bs; data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, data->pnum); data->done = true; } /* * Synchronous wrapper around bdrv_co_get_block_status(). * * See bdrv_co_get_block_status() for details. */ int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { Coroutine *co; BdrvCoGetBlockStatusData data = { .bs = bs, .sector_num = sector_num, .nb_sectors = nb_sectors, .pnum = pnum, .done = false, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_get_block_status_co_entry(&data); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_get_block_status_co_entry); qemu_coroutine_enter(co, &data); while (!data.done) { aio_poll(aio_context, true); } } return data.ret; } int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); if (ret < 0) { return ret; } return !!(ret & BDRV_BLOCK_ALLOCATED); } /* * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] * * Return true if the given sector is allocated in any image between * BASE and TOP (inclusive). BASE can be NULL to check if the given * sector is allocated in any image of the chain. Return false otherwise. * * 'pnum' is set to the number of sectors (including and immediately following * the specified sector) that are known to be in the same * allocated/unallocated state. * */ int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, int64_t sector_num, int nb_sectors, int *pnum) { BlockDriverState *intermediate; int ret, n = nb_sectors; intermediate = top; while (intermediate && intermediate != base) { int pnum_inter; ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, &pnum_inter); if (ret < 0) { return ret; } else if (ret) { *pnum = pnum_inter; return 1; } /* * [sector_num, nb_sectors] is unallocated on top but intermediate * might have * * [sector_num+x, nr_sectors] allocated. */ if (n > pnum_inter && (intermediate == top || sector_num + pnum_inter < intermediate->total_sectors)) { n = pnum_inter; } intermediate = intermediate->backing_hd; } *pnum = n; return 0; } const char *bdrv_get_encrypted_filename(BlockDriverState *bs) { if (bs->backing_hd && bs->backing_hd->encrypted) return bs->backing_file; else if (bs->encrypted) return bs->filename; else return NULL; } void bdrv_get_backing_filename(BlockDriverState *bs, char *filename, int filename_size) { pstrcpy(filename, filename_size, bs->backing_file); } int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_write_compressed) return -ENOTSUP; if (bdrv_check_request(bs, sector_num, nb_sectors)) return -EIO; assert(QLIST_EMPTY(&bs->dirty_bitmaps)); return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); } int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_get_info) return -ENOTSUP; memset(bdi, 0, sizeof(*bdi)); return drv->bdrv_get_info(bs, bdi); } ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_get_specific_info) { return drv->bdrv_get_specific_info(bs); } return NULL; } int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, int64_t pos, int size) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *) buf, .iov_len = size, }; qemu_iovec_init_external(&qiov, &iov, 1); return bdrv_writev_vmstate(bs, &qiov, pos); } int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; if (!drv) { return -ENOMEDIUM; } else if (drv->bdrv_save_vmstate) { return drv->bdrv_save_vmstate(bs, qiov, pos); } else if (bs->file) { return bdrv_writev_vmstate(bs->file, qiov, pos); } return -ENOTSUP; } int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, int64_t pos, int size) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->bdrv_load_vmstate) return drv->bdrv_load_vmstate(bs, buf, pos, size); if (bs->file) return bdrv_load_vmstate(bs->file, buf, pos, size); return -ENOTSUP; } void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) { if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { return; } bs->drv->bdrv_debug_event(bs, event); } int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, const char *tag) { while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { bs = bs->file; } if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) { return bs->drv->bdrv_debug_breakpoint(bs, event, tag); } return -ENOTSUP; } int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) { while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) { bs = bs->file; } if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) { return bs->drv->bdrv_debug_remove_breakpoint(bs, tag); } return -ENOTSUP; } int bdrv_debug_resume(BlockDriverState *bs, const char *tag) { while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { bs = bs->file; } if (bs && bs->drv && bs->drv->bdrv_debug_resume) { return bs->drv->bdrv_debug_resume(bs, tag); } return -ENOTSUP; } bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) { while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { bs = bs->file; } if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) { return bs->drv->bdrv_debug_is_suspended(bs, tag); } return false; } int bdrv_is_snapshot(BlockDriverState *bs) { return !!(bs->open_flags & BDRV_O_SNAPSHOT); } /* backing_file can either be relative, or absolute, or a protocol. If it is * relative, it must be relative to the chain. So, passing in bs->filename * from a BDS as backing_file should not be done, as that may be relative to * the CWD rather than the chain. */ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, const char *backing_file) { char *filename_full = NULL; char *backing_file_full = NULL; char *filename_tmp = NULL; int is_protocol = 0; BlockDriverState *curr_bs = NULL; BlockDriverState *retval = NULL; if (!bs || !bs->drv || !backing_file) { return NULL; } filename_full = g_malloc(PATH_MAX); backing_file_full = g_malloc(PATH_MAX); filename_tmp = g_malloc(PATH_MAX); is_protocol = path_has_protocol(backing_file); for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) { /* If either of the filename paths is actually a protocol, then * compare unmodified paths; otherwise make paths relative */ if (is_protocol || path_has_protocol(curr_bs->backing_file)) { if (strcmp(backing_file, curr_bs->backing_file) == 0) { retval = curr_bs->backing_hd; break; } } else { /* If not an absolute filename path, make it relative to the current * image's filename path */ path_combine(filename_tmp, PATH_MAX, curr_bs->filename, backing_file); /* We are going to compare absolute pathnames */ if (!realpath(filename_tmp, filename_full)) { continue; } /* We need to make sure the backing filename we are comparing against * is relative to the current image filename (or absolute) */ path_combine(filename_tmp, PATH_MAX, curr_bs->filename, curr_bs->backing_file); if (!realpath(filename_tmp, backing_file_full)) { continue; } if (strcmp(backing_file_full, filename_full) == 0) { retval = curr_bs->backing_hd; break; } } } g_free(filename_full); g_free(backing_file_full); g_free(filename_tmp); return retval; } int bdrv_get_backing_file_depth(BlockDriverState *bs) { if (!bs->drv) { return 0; } if (!bs->backing_hd) { return 0; } return 1 + bdrv_get_backing_file_depth(bs->backing_hd); } /**************************************************************/ /* async I/Os */ BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, cb, opaque, false); } BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, cb, opaque, true); } BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, BDRV_REQ_ZERO_WRITE | flags, cb, opaque, true); } typedef struct MultiwriteCB { int error; int num_requests; int num_callbacks; struct { BlockCompletionFunc *cb; void *opaque; QEMUIOVector *free_qiov; } callbacks[]; } MultiwriteCB; static void multiwrite_user_cb(MultiwriteCB *mcb) { int i; for (i = 0; i < mcb->num_callbacks; i++) { mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); if (mcb->callbacks[i].free_qiov) { qemu_iovec_destroy(mcb->callbacks[i].free_qiov); } g_free(mcb->callbacks[i].free_qiov); } } static void multiwrite_cb(void *opaque, int ret) { MultiwriteCB *mcb = opaque; trace_multiwrite_cb(mcb, ret); if (ret < 0 && !mcb->error) { mcb->error = ret; } mcb->num_requests--; if (mcb->num_requests == 0) { multiwrite_user_cb(mcb); g_free(mcb); } } static int multiwrite_req_compare(const void *a, const void *b) { const BlockRequest *req1 = a, *req2 = b; /* * Note that we can't simply subtract req2->sector from req1->sector * here as that could overflow the return value. */ if (req1->sector > req2->sector) { return 1; } else if (req1->sector < req2->sector) { return -1; } else { return 0; } } /* * Takes a bunch of requests and tries to merge them. Returns the number of * requests that remain after merging. */ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, int num_reqs, MultiwriteCB *mcb) { int i, outidx; // Sort requests by start sector qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); // Check if adjacent requests touch the same clusters. If so, combine them, // filling up gaps with zero sectors. outidx = 0; for (i = 1; i < num_reqs; i++) { int merge = 0; int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; // Handle exactly sequential writes and overlapping writes. if (reqs[i].sector <= oldreq_last) { merge = 1; } if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { merge = 0; } if (merge) { size_t size; QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); qemu_iovec_init(qiov, reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); // Add the first request to the merged one. If the requests are // overlapping, drop the last sectors of the first request. size = (reqs[i].sector - reqs[outidx].sector) << 9; qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); // We should need to add any zeros between the two requests assert (reqs[i].sector <= oldreq_last); // Add the second request qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); // Add tail of first request, if necessary if (qiov->size < reqs[outidx].qiov->size) { qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, reqs[outidx].qiov->size - qiov->size); } reqs[outidx].nb_sectors = qiov->size >> 9; reqs[outidx].qiov = qiov; mcb->callbacks[i].free_qiov = reqs[outidx].qiov; } else { outidx++; reqs[outidx].sector = reqs[i].sector; reqs[outidx].nb_sectors = reqs[i].nb_sectors; reqs[outidx].qiov = reqs[i].qiov; } } return outidx + 1; } /* * Submit multiple AIO write requests at once. * * On success, the function returns 0 and all requests in the reqs array have * been submitted. In error case this function returns -1, and any of the * requests may or may not be submitted yet. In particular, this means that the * callback will be called for some of the requests, for others it won't. The * caller must check the error field of the BlockRequest to wait for the right * callbacks (if error != 0, no callback will be called). * * The implementation may modify the contents of the reqs array, e.g. to merge * requests. However, the fields opaque and error are left unmodified as they * are used to signal failure for a single request to the caller. */ int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) { MultiwriteCB *mcb; int i; /* don't submit writes if we don't have a medium */ if (bs->drv == NULL) { for (i = 0; i < num_reqs; i++) { reqs[i].error = -ENOMEDIUM; } return -1; } if (num_reqs == 0) { return 0; } // Create MultiwriteCB structure mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); mcb->num_requests = 0; mcb->num_callbacks = num_reqs; for (i = 0; i < num_reqs; i++) { mcb->callbacks[i].cb = reqs[i].cb; mcb->callbacks[i].opaque = reqs[i].opaque; } // Check for mergable requests num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); /* Run the aio requests. */ mcb->num_requests = num_reqs; for (i = 0; i < num_reqs; i++) { bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, reqs[i].nb_sectors, reqs[i].flags, multiwrite_cb, mcb, true); } return 0; } void bdrv_aio_cancel(BlockAIOCB *acb) { qemu_aio_ref(acb); bdrv_aio_cancel_async(acb); while (acb->refcnt > 1) { if (acb->aiocb_info->get_aio_context) { aio_poll(acb->aiocb_info->get_aio_context(acb), true); } else if (acb->bs) { aio_poll(bdrv_get_aio_context(acb->bs), true); } else { abort(); } } qemu_aio_unref(acb); } /* Async version of aio cancel. The caller is not blocked if the acb implements * cancel_async, otherwise we do nothing and let the request normally complete. * In either case the completion callback must be called. */ void bdrv_aio_cancel_async(BlockAIOCB *acb) { if (acb->aiocb_info->cancel_async) { acb->aiocb_info->cancel_async(acb); } } /**************************************************************/ /* async block device emulation */ typedef struct BlockAIOCBSync { BlockAIOCB common; QEMUBH *bh; int ret; /* vector translation state */ QEMUIOVector *qiov; uint8_t *bounce; int is_write; } BlockAIOCBSync; static const AIOCBInfo bdrv_em_aiocb_info = { .aiocb_size = sizeof(BlockAIOCBSync), }; static void bdrv_aio_bh_cb(void *opaque) { BlockAIOCBSync *acb = opaque; if (!acb->is_write && acb->ret >= 0) { qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, acb->ret); qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_aio_unref(acb); } static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque, int is_write) { BlockAIOCBSync *acb; acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); acb->is_write = is_write; acb->qiov = qiov; acb->bounce = qemu_try_blockalign(bs, qiov->size); acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); if (acb->bounce == NULL) { acb->ret = -ENOMEM; } else if (is_write) { qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); } else { acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); } qemu_bh_schedule(acb->bh); return &acb->common; } static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); } static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); } typedef struct BlockAIOCBCoroutine { BlockAIOCB common; BlockRequest req; bool is_write; bool *done; QEMUBH* bh; } BlockAIOCBCoroutine; static const AIOCBInfo bdrv_em_co_aiocb_info = { .aiocb_size = sizeof(BlockAIOCBCoroutine), }; static void bdrv_co_em_bh(void *opaque) { BlockAIOCBCoroutine *acb = opaque; acb->common.cb(acb->common.opaque, acb->req.error); qemu_bh_delete(acb->bh); qemu_aio_unref(acb); } /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ static void coroutine_fn bdrv_co_do_rw(void *opaque) { BlockAIOCBCoroutine *acb = opaque; BlockDriverState *bs = acb->common.bs; if (!acb->is_write) { acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, acb->req.nb_sectors, acb->req.qiov, acb->req.flags); } else { acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, acb->req.nb_sectors, acb->req.qiov, acb->req.flags); } acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); qemu_bh_schedule(acb->bh); } static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque, bool is_write) { Coroutine *co; BlockAIOCBCoroutine *acb; acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); acb->req.sector = sector_num; acb->req.nb_sectors = nb_sectors; acb->req.qiov = qiov; acb->req.flags = flags; acb->is_write = is_write; co = qemu_coroutine_create(bdrv_co_do_rw); qemu_coroutine_enter(co, acb); return &acb->common; } static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) { BlockAIOCBCoroutine *acb = opaque; BlockDriverState *bs = acb->common.bs; acb->req.error = bdrv_co_flush(bs); acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); qemu_bh_schedule(acb->bh); } BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_flush(bs, opaque); Coroutine *co; BlockAIOCBCoroutine *acb; acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); co = qemu_coroutine_create(bdrv_aio_flush_co_entry); qemu_coroutine_enter(co, acb); return &acb->common; } static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) { BlockAIOCBCoroutine *acb = opaque; BlockDriverState *bs = acb->common.bs; acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); qemu_bh_schedule(acb->bh); } BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { Coroutine *co; BlockAIOCBCoroutine *acb; trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); acb->req.sector = sector_num; acb->req.nb_sectors = nb_sectors; co = qemu_coroutine_create(bdrv_aio_discard_co_entry); qemu_coroutine_enter(co, acb); return &acb->common; } void bdrv_init(void) { module_call_init(MODULE_INIT_BLOCK); } void bdrv_init_with_whitelist(void) { use_bdrv_whitelist = 1; bdrv_init(); } void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque) { BlockAIOCB *acb; acb = g_slice_alloc(aiocb_info->aiocb_size); acb->aiocb_info = aiocb_info; acb->bs = bs; acb->cb = cb; acb->opaque = opaque; acb->refcnt = 1; return acb; } void qemu_aio_ref(void *p) { BlockAIOCB *acb = p; acb->refcnt++; } void qemu_aio_unref(void *p) { BlockAIOCB *acb = p; assert(acb->refcnt > 0); if (--acb->refcnt == 0) { g_slice_free1(acb->aiocb_info->aiocb_size, acb); } } /**************************************************************/ /* Coroutine block device emulation */ typedef struct CoroutineIOCompletion { Coroutine *coroutine; int ret; } CoroutineIOCompletion; static void bdrv_co_io_em_complete(void *opaque, int ret) { CoroutineIOCompletion *co = opaque; co->ret = ret; qemu_coroutine_enter(co->coroutine, NULL); } static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov, bool is_write) { CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; BlockAIOCB *acb; if (is_write) { acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, bdrv_co_io_em_complete, &co); } else { acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, bdrv_co_io_em_complete, &co); } trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); if (!acb) { return -EIO; } qemu_coroutine_yield(); return co.ret; } static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov) { return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); } static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov) { return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); } static void coroutine_fn bdrv_flush_co_entry(void *opaque) { RwCo *rwco = opaque; rwco->ret = bdrv_co_flush(rwco->bs); } int coroutine_fn bdrv_co_flush(BlockDriverState *bs) { int ret; if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { return 0; } /* Write back cached data to the OS even with cache=unsafe */ BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); if (bs->drv->bdrv_co_flush_to_os) { ret = bs->drv->bdrv_co_flush_to_os(bs); if (ret < 0) { return ret; } } /* But don't actually force it to the disk with cache=unsafe */ if (bs->open_flags & BDRV_O_NO_FLUSH) { goto flush_parent; } BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); if (bs->drv->bdrv_co_flush_to_disk) { ret = bs->drv->bdrv_co_flush_to_disk(bs); } else if (bs->drv->bdrv_aio_flush) { BlockAIOCB *acb; CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); if (acb == NULL) { ret = -EIO; } else { qemu_coroutine_yield(); ret = co.ret; } } else { /* * Some block drivers always operate in either writethrough or unsafe * mode and don't support bdrv_flush therefore. Usually qemu doesn't * know how the server works (because the behaviour is hardcoded or * depends on server-side configuration), so we can't ensure that * everything is safe on disk. Returning an error doesn't work because * that would break guests even if the server operates in writethrough * mode. * * Let's hope the user knows what he's doing. */ ret = 0; } if (ret < 0) { return ret; } /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH * in the case of cache=unsafe, so there are no useless flushes. */ flush_parent: return bdrv_co_flush(bs->file); } void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp) { Error *local_err = NULL; int ret; if (!bs->drv) { return; } if (!(bs->open_flags & BDRV_O_INCOMING)) { return; } bs->open_flags &= ~BDRV_O_INCOMING; if (bs->drv->bdrv_invalidate_cache) { bs->drv->bdrv_invalidate_cache(bs, &local_err); } else if (bs->file) { bdrv_invalidate_cache(bs->file, &local_err); } if (local_err) { error_propagate(errp, local_err); return; } ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); return; } } void bdrv_invalidate_cache_all(Error **errp) { BlockDriverState *bs; Error *local_err = NULL; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); bdrv_invalidate_cache(bs, &local_err); aio_context_release(aio_context); if (local_err) { error_propagate(errp, local_err); return; } } } int bdrv_flush(BlockDriverState *bs) { Coroutine *co; RwCo rwco = { .bs = bs, .ret = NOT_DONE, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_flush_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_flush_co_entry); qemu_coroutine_enter(co, &rwco); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; } typedef struct DiscardCo { BlockDriverState *bs; int64_t sector_num; int nb_sectors; int ret; } DiscardCo; static void coroutine_fn bdrv_discard_co_entry(void *opaque) { DiscardCo *rwco = opaque; rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); } /* if no limit is specified in the BlockLimits use a default * of 32768 512-byte sectors (16 MiB) per request. */ #define MAX_DISCARD_DEFAULT 32768 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { int max_discard; if (!bs->drv) { return -ENOMEDIUM; } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { return -EIO; } else if (bs->read_only) { return -EROFS; } bdrv_reset_dirty(bs, sector_num, nb_sectors); /* Do nothing if disabled. */ if (!(bs->open_flags & BDRV_O_UNMAP)) { return 0; } if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { return 0; } max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT; while (nb_sectors > 0) { int ret; int num = nb_sectors; /* align request */ if (bs->bl.discard_alignment && num >= bs->bl.discard_alignment && sector_num % bs->bl.discard_alignment) { if (num > bs->bl.discard_alignment) { num = bs->bl.discard_alignment; } num -= sector_num % bs->bl.discard_alignment; } /* limit request size */ if (num > max_discard) { num = max_discard; } if (bs->drv->bdrv_co_discard) { ret = bs->drv->bdrv_co_discard(bs, sector_num, num); } else { BlockAIOCB *acb; CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, bdrv_co_io_em_complete, &co); if (acb == NULL) { return -EIO; } else { qemu_coroutine_yield(); ret = co.ret; } } if (ret && ret != -ENOTSUP) { return ret; } sector_num += num; nb_sectors -= num; } return 0; } int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { Coroutine *co; DiscardCo rwco = { .bs = bs, .sector_num = sector_num, .nb_sectors = nb_sectors, .ret = NOT_DONE, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_discard_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_discard_co_entry); qemu_coroutine_enter(co, &rwco); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; } /**************************************************************/ /* removable device support */ /** * Return TRUE if the media is present */ int bdrv_is_inserted(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (!drv) return 0; if (!drv->bdrv_is_inserted) return 1; return drv->bdrv_is_inserted(bs); } /** * Return whether the media changed since the last call to this * function, or -ENOTSUP if we don't know. Most drivers don't know. */ int bdrv_media_changed(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_media_changed) { return drv->bdrv_media_changed(bs); } return -ENOTSUP; } /** * If eject_flag is TRUE, eject the media. Otherwise, close the tray */ void bdrv_eject(BlockDriverState *bs, bool eject_flag) { BlockDriver *drv = bs->drv; const char *device_name; if (drv && drv->bdrv_eject) { drv->bdrv_eject(bs, eject_flag); } device_name = bdrv_get_device_name(bs); if (device_name[0] != '\0') { qapi_event_send_device_tray_moved(device_name, eject_flag, &error_abort); } } /** * Lock or unlock the media (if it is locked, the user won't be able * to eject it manually). */ void bdrv_lock_medium(BlockDriverState *bs, bool locked) { BlockDriver *drv = bs->drv; trace_bdrv_lock_medium(bs, locked); if (drv && drv->bdrv_lock_medium) { drv->bdrv_lock_medium(bs, locked); } } /* needed for generic scsi interface */ int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_ioctl) return drv->bdrv_ioctl(bs, req, buf); return -ENOTSUP; } BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_aio_ioctl) return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); return NULL; } void bdrv_set_guest_block_size(BlockDriverState *bs, int align) { bs->guest_block_size = align; } void *qemu_blockalign(BlockDriverState *bs, size_t size) { return qemu_memalign(bdrv_opt_mem_align(bs), size); } void *qemu_blockalign0(BlockDriverState *bs, size_t size) { return memset(qemu_blockalign(bs, size), 0, size); } void *qemu_try_blockalign(BlockDriverState *bs, size_t size) { size_t align = bdrv_opt_mem_align(bs); /* Ensure that NULL is never returned on success */ assert(align > 0); if (size == 0) { size = align; } return qemu_try_memalign(align, size); } void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) { void *mem = qemu_try_blockalign(bs, size); if (mem) { memset(mem, 0, size); } return mem; } /* * Check if all memory in this vector is sector aligned. */ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) { int i; size_t alignment = bdrv_opt_mem_align(bs); for (i = 0; i < qiov->niov; i++) { if ((uintptr_t) qiov->iov[i].iov_base % alignment) { return false; } if (qiov->iov[i].iov_len % alignment) { return false; } } return true; } BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity, Error **errp) { int64_t bitmap_size; BdrvDirtyBitmap *bitmap; assert((granularity & (granularity - 1)) == 0); granularity >>= BDRV_SECTOR_BITS; assert(granularity); bitmap_size = bdrv_nb_sectors(bs); if (bitmap_size < 0) { error_setg_errno(errp, -bitmap_size, "could not get length of device"); errno = -bitmap_size; return NULL; } bitmap = g_new0(BdrvDirtyBitmap, 1); bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); return bitmap; } void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) { BdrvDirtyBitmap *bm, *next; QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { if (bm == bitmap) { QLIST_REMOVE(bitmap, list); hbitmap_free(bitmap->bitmap); g_free(bitmap); return; } } } BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) { BdrvDirtyBitmap *bm; BlockDirtyInfoList *list = NULL; BlockDirtyInfoList **plist = &list; QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1); BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1); info->count = bdrv_get_dirty_count(bs, bm); info->granularity = ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); entry->value = info; *plist = entry; plist = &entry->next; } return list; } int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector) { if (bitmap) { return hbitmap_get(bitmap->bitmap, sector); } else { return 0; } } void bdrv_dirty_iter_init(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) { hbitmap_iter_init(hbi, bitmap->bitmap, 0); } void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors) { BdrvDirtyBitmap *bitmap; QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); } } void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors) { BdrvDirtyBitmap *bitmap; QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); } } int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) { return hbitmap_count(bitmap->bitmap); } /* Get a reference to bs */ void bdrv_ref(BlockDriverState *bs) { bs->refcnt++; } /* Release a previously grabbed reference to bs. * If after releasing, reference count is zero, the BlockDriverState is * deleted. */ void bdrv_unref(BlockDriverState *bs) { if (!bs) { return; } assert(bs->refcnt > 0); if (--bs->refcnt == 0) { bdrv_delete(bs); } } struct BdrvOpBlocker { Error *reason; QLIST_ENTRY(BdrvOpBlocker) list; }; bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) { BdrvOpBlocker *blocker; assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); if (!QLIST_EMPTY(&bs->op_blockers[op])) { blocker = QLIST_FIRST(&bs->op_blockers[op]); if (errp) { error_setg(errp, "Device '%s' is busy: %s", bdrv_get_device_name(bs), error_get_pretty(blocker->reason)); } return true; } return false; } void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason) { BdrvOpBlocker *blocker; assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); blocker = g_new0(BdrvOpBlocker, 1); blocker->reason = reason; QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list); } void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason) { BdrvOpBlocker *blocker, *next; assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) { if (blocker->reason == reason) { QLIST_REMOVE(blocker, list); g_free(blocker); } } } void bdrv_op_block_all(BlockDriverState *bs, Error *reason) { int i; for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { bdrv_op_block(bs, i, reason); } } void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason) { int i; for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { bdrv_op_unblock(bs, i, reason); } } bool bdrv_op_blocker_is_empty(BlockDriverState *bs) { int i; for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { if (!QLIST_EMPTY(&bs->op_blockers[i])) { return false; } } return true; } void bdrv_iostatus_enable(BlockDriverState *bs) { bs->iostatus_enabled = true; bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; } /* The I/O status is only enabled if the drive explicitly * enables it _and_ the VM is configured to stop on errors */ bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) { return (bs->iostatus_enabled && (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || bs->on_write_error == BLOCKDEV_ON_ERROR_STOP || bs->on_read_error == BLOCKDEV_ON_ERROR_STOP)); } void bdrv_iostatus_disable(BlockDriverState *bs) { bs->iostatus_enabled = false; } void bdrv_iostatus_reset(BlockDriverState *bs) { if (bdrv_iostatus_is_enabled(bs)) { bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; if (bs->job) { block_job_iostatus_reset(bs->job); } } } void bdrv_iostatus_set_err(BlockDriverState *bs, int error) { assert(bdrv_iostatus_is_enabled(bs)); if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : BLOCK_DEVICE_IO_STATUS_FAILED; } } void bdrv_img_create(const char *filename, const char *fmt, const char *base_filename, const char *base_fmt, char *options, uint64_t img_size, int flags, Error **errp, bool quiet) { QemuOptsList *create_opts = NULL; QemuOpts *opts = NULL; const char *backing_fmt, *backing_file; int64_t size; BlockDriver *drv, *proto_drv; BlockDriver *backing_drv = NULL; Error *local_err = NULL; int ret = 0; /* Find driver and parse its options */ drv = bdrv_find_format(fmt); if (!drv) { error_setg(errp, "Unknown file format '%s'", fmt); return; } proto_drv = bdrv_find_protocol(filename, true); if (!proto_drv) { error_setg(errp, "Unknown protocol '%s'", filename); return; } create_opts = qemu_opts_append(create_opts, drv->create_opts); create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); /* Create parameter list with default values */ opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size); /* Parse -o options */ if (options) { if (qemu_opts_do_parse(opts, options, NULL) != 0) { error_setg(errp, "Invalid options for file format '%s'", fmt); goto out; } } if (base_filename) { if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) { error_setg(errp, "Backing file not supported for file format '%s'", fmt); goto out; } } if (base_fmt) { if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) { error_setg(errp, "Backing file format not supported for file " "format '%s'", fmt); goto out; } } backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); if (backing_file) { if (!strcmp(filename, backing_file)) { error_setg(errp, "Error: Trying to create an image with the " "same filename as the backing file"); goto out; } } backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); if (backing_fmt) { backing_drv = bdrv_find_format(backing_fmt); if (!backing_drv) { error_setg(errp, "Unknown backing file format '%s'", backing_fmt); goto out; } } // The size for the image must always be specified, with one exception: // If we are using a backing file, we can obtain the size from there size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); if (size == -1) { if (backing_file) { BlockDriverState *bs; int64_t size; int back_flags; /* backing files always opened read-only */ back_flags = flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); bs = NULL; ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags, backing_drv, &local_err); if (ret < 0) { error_setg_errno(errp, -ret, "Could not open '%s': %s", backing_file, error_get_pretty(local_err)); error_free(local_err); local_err = NULL; goto out; } size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "Could not get size of '%s'", backing_file); bdrv_unref(bs); goto out; } qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size); bdrv_unref(bs); } else { error_setg(errp, "Image creation needs a size parameter"); goto out; } } if (!quiet) { printf("Formatting '%s', fmt=%s ", filename, fmt); qemu_opts_print(opts); puts(""); } ret = bdrv_create(drv, filename, opts, &local_err); if (ret == -EFBIG) { /* This is generally a better message than whatever the driver would * deliver (especially because of the cluster_size_hint), since that * is most probably not much different from "image too large". */ const char *cluster_size_hint = ""; if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) { cluster_size_hint = " (try using a larger cluster size)"; } error_setg(errp, "The image size is too large for file format '%s'" "%s", fmt, cluster_size_hint); error_free(local_err); local_err = NULL; } out: qemu_opts_del(opts); qemu_opts_free(create_opts); if (local_err) { error_propagate(errp, local_err); } } AioContext *bdrv_get_aio_context(BlockDriverState *bs) { return bs->aio_context; } void bdrv_detach_aio_context(BlockDriverState *bs) { BdrvAioNotifier *baf; if (!bs->drv) { return; } QLIST_FOREACH(baf, &bs->aio_notifiers, list) { baf->detach_aio_context(baf->opaque); } if (bs->io_limits_enabled) { throttle_detach_aio_context(&bs->throttle_state); } if (bs->drv->bdrv_detach_aio_context) { bs->drv->bdrv_detach_aio_context(bs); } if (bs->file) { bdrv_detach_aio_context(bs->file); } if (bs->backing_hd) { bdrv_detach_aio_context(bs->backing_hd); } bs->aio_context = NULL; } void bdrv_attach_aio_context(BlockDriverState *bs, AioContext *new_context) { BdrvAioNotifier *ban; if (!bs->drv) { return; } bs->aio_context = new_context; if (bs->backing_hd) { bdrv_attach_aio_context(bs->backing_hd, new_context); } if (bs->file) { bdrv_attach_aio_context(bs->file, new_context); } if (bs->drv->bdrv_attach_aio_context) { bs->drv->bdrv_attach_aio_context(bs, new_context); } if (bs->io_limits_enabled) { throttle_attach_aio_context(&bs->throttle_state, new_context); } QLIST_FOREACH(ban, &bs->aio_notifiers, list) { ban->attached_aio_context(new_context, ban->opaque); } } void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) { bdrv_drain_all(); /* ensure there are no in-flight requests */ bdrv_detach_aio_context(bs); /* This function executes in the old AioContext so acquire the new one in * case it runs in a different thread. */ aio_context_acquire(new_context); bdrv_attach_aio_context(bs, new_context); aio_context_release(new_context); } void bdrv_add_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *new_context, void *opaque), void (*detach_aio_context)(void *opaque), void *opaque) { BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1); *ban = (BdrvAioNotifier){ .attached_aio_context = attached_aio_context, .detach_aio_context = detach_aio_context, .opaque = opaque }; QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list); } void bdrv_remove_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *, void *), void (*detach_aio_context)(void *), void *opaque) { BdrvAioNotifier *ban, *ban_next; QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { if (ban->attached_aio_context == attached_aio_context && ban->detach_aio_context == detach_aio_context && ban->opaque == opaque) { QLIST_REMOVE(ban, list); g_free(ban); return; } } abort(); } void bdrv_add_before_write_notifier(BlockDriverState *bs, NotifierWithReturn *notifier) { notifier_with_return_list_add(&bs->before_write_notifiers, notifier); } int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts) { if (!bs->drv->bdrv_amend_options) { return -ENOTSUP; } return bs->drv->bdrv_amend_options(bs, opts); } /* This function will be called by the bdrv_recurse_is_first_non_filter method * of block filter and by bdrv_is_first_non_filter. * It is used to test if the given bs is the candidate or recurse more in the * node graph. */ bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, BlockDriverState *candidate) { /* return false if basic checks fails */ if (!bs || !bs->drv) { return false; } /* the code reached a non block filter driver -> check if the bs is * the same as the candidate. It's the recursion termination condition. */ if (!bs->drv->is_filter) { return bs == candidate; } /* Down this path the driver is a block filter driver */ /* If the block filter recursion method is defined use it to recurse down * the node graph. */ if (bs->drv->bdrv_recurse_is_first_non_filter) { return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate); } /* the driver is a block filter but don't allow to recurse -> return false */ return false; } /* This function checks if the candidate is the first non filter bs down it's * bs chain. Since we don't have pointers to parents it explore all bs chains * from the top. Some filters can choose not to pass down the recursion. */ bool bdrv_is_first_non_filter(BlockDriverState *candidate) { BlockDriverState *bs; /* walk down the bs forest recursively */ QTAILQ_FOREACH(bs, &bdrv_states, device_list) { bool perm; /* try to recurse in this top level bs */ perm = bdrv_recurse_is_first_non_filter(bs, candidate); /* candidate is the first non filter */ if (perm) { return true; } } return false; } BlockDriverState *check_to_replace_node(const char *node_name, Error **errp) { BlockDriverState *to_replace_bs = bdrv_find_node(node_name); if (!to_replace_bs) { error_setg(errp, "Node name '%s' not found", node_name); return NULL; } if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) { return NULL; } /* We don't want arbitrary node of the BDS chain to be replaced only the top * most non filter in order to prevent data corruption. * Another benefit is that this tests exclude backing files which are * blocked by the backing blockers. */ if (!bdrv_is_first_non_filter(to_replace_bs)) { error_setg(errp, "Only top most non filter can be replaced"); return NULL; } return to_replace_bs; } void bdrv_io_plug(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_plug) { drv->bdrv_io_plug(bs); } else if (bs->file) { bdrv_io_plug(bs->file); } } void bdrv_io_unplug(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_unplug) { drv->bdrv_io_unplug(bs); } else if (bs->file) { bdrv_io_unplug(bs->file); } } void bdrv_flush_io_queue(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_flush_io_queue) { drv->bdrv_flush_io_queue(bs); } else if (bs->file) { bdrv_flush_io_queue(bs->file); } } static bool append_open_options(QDict *d, BlockDriverState *bs) { const QDictEntry *entry; bool found_any = false; for (entry = qdict_first(bs->options); entry; entry = qdict_next(bs->options, entry)) { /* Only take options for this level and exclude all non-driver-specific * options */ if (!strchr(qdict_entry_key(entry), '.') && strcmp(qdict_entry_key(entry), "node-name")) { qobject_incref(qdict_entry_value(entry)); qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry)); found_any = true; } } return found_any; } /* Updates the following BDS fields: * - exact_filename: A filename which may be used for opening a block device * which (mostly) equals the given BDS (even without any * other options; so reading and writing must return the same * results, but caching etc. may be different) * - full_open_options: Options which, when given when opening a block device * (without a filename), result in a BDS (mostly) * equalling the given one * - filename: If exact_filename is set, it is copied here. Otherwise, * full_open_options is converted to a JSON object, prefixed with * "json:" (for use through the JSON pseudo protocol) and put here. */ void bdrv_refresh_filename(BlockDriverState *bs) { BlockDriver *drv = bs->drv; QDict *opts; if (!drv) { return; } /* This BDS's file name will most probably depend on its file's name, so * refresh that first */ if (bs->file) { bdrv_refresh_filename(bs->file); } if (drv->bdrv_refresh_filename) { /* Obsolete information is of no use here, so drop the old file name * information before refreshing it */ bs->exact_filename[0] = '\0'; if (bs->full_open_options) { QDECREF(bs->full_open_options); bs->full_open_options = NULL; } drv->bdrv_refresh_filename(bs); } else if (bs->file) { /* Try to reconstruct valid information from the underlying file */ bool has_open_options; bs->exact_filename[0] = '\0'; if (bs->full_open_options) { QDECREF(bs->full_open_options); bs->full_open_options = NULL; } opts = qdict_new(); has_open_options = append_open_options(opts, bs); /* If no specific options have been given for this BDS, the filename of * the underlying file should suffice for this one as well */ if (bs->file->exact_filename[0] && !has_open_options) { strcpy(bs->exact_filename, bs->file->exact_filename); } /* Reconstructing the full options QDict is simple for most format block * drivers, as long as the full options are known for the underlying * file BDS. The full options QDict of that file BDS should somehow * contain a representation of the filename, therefore the following * suffices without querying the (exact_)filename of this BDS. */ if (bs->file->full_open_options) { qdict_put_obj(opts, "driver", QOBJECT(qstring_from_str(drv->format_name))); QINCREF(bs->file->full_open_options); qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options)); bs->full_open_options = opts; } else { QDECREF(opts); } } else if (!bs->full_open_options && qdict_size(bs->options)) { /* There is no underlying file BDS (at least referenced by BDS.file), * so the full options QDict should be equal to the options given * specifically for this block device when it was opened (plus the * driver specification). * Because those options don't change, there is no need to update * full_open_options when it's already set. */ opts = qdict_new(); append_open_options(opts, bs); qdict_put_obj(opts, "driver", QOBJECT(qstring_from_str(drv->format_name))); if (bs->exact_filename[0]) { /* This may not work for all block protocol drivers (some may * require this filename to be parsed), but we have to find some * default solution here, so just include it. If some block driver * does not support pure options without any filename at all or * needs some special format of the options QDict, it needs to * implement the driver-specific bdrv_refresh_filename() function. */ qdict_put_obj(opts, "filename", QOBJECT(qstring_from_str(bs->exact_filename))); } bs->full_open_options = opts; } if (bs->exact_filename[0]) { pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename); } else if (bs->full_open_options) { QString *json = qobject_to_json(QOBJECT(bs->full_open_options)); snprintf(bs->filename, sizeof(bs->filename), "json:%s", qstring_get_str(json)); QDECREF(json); } } /* This accessor function purpose is to allow the device models to access the * BlockAcctStats structure embedded inside a BlockDriverState without being * aware of the BlockDriverState structure layout. * It will go away when the BlockAcctStats structure will be moved inside * the device models. */ BlockAcctStats *bdrv_get_stats(BlockDriverState *bs) { return &bs->stats; }
AnttiLukats/orp
third-party/qemu-orp/block.c
C
apache-2.0
173,814
/* * linux/mm/vmscan.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). * Multiqueue VM started 5.8.00, Rik van Riel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmpressure.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for try_to_release_page(), buffer_heads_over_limit */ #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/compaction.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/memcontrol.h> #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/oom.h> #include <linux/prefetch.h> #include <linux/printk.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include <linux/balloon_compaction.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> struct scan_control { /* How many pages shrink_list() should reclaim */ unsigned long nr_to_reclaim; /* This context's GFP mask */ gfp_t gfp_mask; /* Allocation order */ int order; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ nodemask_t *nodemask; /* * The memory cgroup that hit its limit and as a result is the * primary target of this reclaim invocation. */ struct mem_cgroup *target_mem_cgroup; /* Scan (total_size >> priority) pages at once */ int priority; unsigned int may_writepage:1; /* Can mapped pages be reclaimed? */ unsigned int may_unmap:1; /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; /* Can cgroups be reclaimed below their normal consumption range? */ unsigned int may_thrash:1; unsigned int hibernation_mode:1; /* One of the zones is ready for compaction */ unsigned int compaction_ready:1; /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; }; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) #ifdef ARCH_HAS_PREFETCH #define prefetch_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) #else #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) #endif #ifdef ARCH_HAS_PREFETCHW #define prefetchw_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) #endif /* * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; /* * The total number of pages which are beyond the high watermark within all * zones. */ unsigned long vm_total_pages; static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } /** * sane_reclaim - is the usual dirty throttling mechanism operational? * @sc: scan_control in question * * The normal page dirty throttling mechanism in balance_dirty_pages() is * completely broken with the legacy memcg and direct stalling in * shrink_page_list() is used for throttling instead, which lacks all the * niceties such as fairness, adaptive pausing, bandwidth proportional * allocation and configurability. * * This function tests whether the vmscan currently in progress can assume * that the normal dirty throttling mechanism is operational. */ static bool sane_reclaim(struct scan_control *sc) { struct mem_cgroup *memcg = sc->target_mem_cgroup; if (!memcg) return true; #ifdef CONFIG_CGROUP_WRITEBACK if (cgroup_on_dfl(mem_cgroup_css(memcg)->cgroup)) return true; #endif return false; } #else static bool global_reclaim(struct scan_control *sc) { return true; } static bool sane_reclaim(struct scan_control *sc) { return true; } #endif static unsigned long zone_reclaimable_pages(struct zone *zone) { int nr; nr = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) nr += zone_page_state(zone, NR_ACTIVE_ANON) + zone_page_state(zone, NR_INACTIVE_ANON); return nr; } bool zone_reclaimable(struct zone *zone) { return zone_page_state(zone, NR_PAGES_SCANNED) < zone_reclaimable_pages(zone) * 6; } static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) return mem_cgroup_get_lru_size(lruvec, lru); return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } /* * Add a shrinker callback to be called from the vm. */ int register_shrinker(struct shrinker *shrinker) { size_t size = sizeof(*shrinker->nr_deferred); /* * If we only have one possible node in the system anyway, save * ourselves the trouble and disable NUMA aware behavior. This way we * will save memory and some small loop time later. */ if (nr_node_ids == 1) shrinker->flags &= ~SHRINKER_NUMA_AWARE; if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) return -ENOMEM; down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); return 0; } EXPORT_SYMBOL(register_shrinker); /* * Remove one */ void unregister_shrinker(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); } EXPORT_SYMBOL(unregister_shrinker); #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, struct shrinker *shrinker, unsigned long nr_scanned, unsigned long nr_eligible) { unsigned long freed = 0; unsigned long long delta; long total_scan; long freeable; long nr; long new_nr; int nid = shrinkctl->nid; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0) return 0; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); total_scan = nr; delta = (4 * nr_scanned) / shrinker->seeks; delta *= freeable; do_div(delta, nr_eligible + 1); total_scan += delta; if (total_scan < 0) { pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", shrinker->scan_objects, total_scan); total_scan = freeable; } /* * We need to avoid excessive windup on filesystem shrinkers * due to large numbers of GFP_NOFS allocations causing the * shrinkers to return -1 all the time. This results in a large * nr being built up so when a shrink that can do some work * comes along it empties the entire cache due to nr >>> * freeable. This is bad for sustaining a working set in * memory. * * Hence only allow the shrinker to scan the entire cache when * a large delta change is calculated directly. */ if (delta < freeable / 4) total_scan = min(total_scan, freeable / 2); /* * Avoid risking looping forever due to too large nr value: * never try to free more than twice the estimate number of * freeable entries. */ if (total_scan > freeable * 2) total_scan = freeable * 2; trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, nr_scanned, nr_eligible, freeable, delta, total_scan); /* * Normally, we should not scan less than batch_size objects in one * pass to avoid too frequent shrinker calls, but if the slab has less * than batch_size objects in total and we are really tight on memory, * we will try to reclaim all available objects, otherwise we can end * up failing allocations although there are plenty of reclaimable * objects spread over several slabs with usage less than the * batch_size. * * We detect the "tight on memory" situations by looking at the total * number of objects we want to scan (total_scan). If it is greater * than the total number of objects on slab (freeable), we must be * scanning at high prio and therefore should try to reclaim as much as * possible. */ while (total_scan >= batch_size || total_scan >= freeable) { unsigned long ret; unsigned long nr_to_scan = min(batch_size, total_scan); shrinkctl->nr_to_scan = nr_to_scan; ret = shrinker->scan_objects(shrinker, shrinkctl); if (ret == SHRINK_STOP) break; freed += ret; count_vm_events(SLABS_SCANNED, nr_to_scan); total_scan -= nr_to_scan; cond_resched(); } /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ if (total_scan > 0) new_nr = atomic_long_add_return(total_scan, &shrinker->nr_deferred[nid]); else new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); return freed; } /** * shrink_slab - shrink slab caches * @gfp_mask: allocation context * @nid: node whose slab caches to target * @memcg: memory cgroup whose slab caches to target * @nr_scanned: pressure numerator * @nr_eligible: pressure denominator * * Call the shrink functions to age shrinkable caches. * * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, * unaware shrinkers will receive a node id of 0 instead. * * @memcg specifies the memory cgroup to target. If it is not NULL, * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan * objects from the memory cgroup specified. Otherwise all shrinkers * are called, and memcg aware shrinkers are supposed to scan the * global list then. * * @nr_scanned and @nr_eligible form a ratio that indicate how much of * the available objects should be scanned. Page reclaim for example * passes the number of pages scanned and the number of pages on the * LRU lists that it considered on @nid, plus a bias in @nr_scanned * when it encountered mapped pages. The ratio is further biased by * the ->seeks setting of the shrink function, which indicates the * cost to recreate an object relative to that of an LRU page. * * Returns the number of reclaimed slab objects. */ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, unsigned long nr_scanned, unsigned long nr_eligible) { struct shrinker *shrinker; unsigned long freed = 0; if (memcg && !memcg_kmem_is_active(memcg)) return 0; if (nr_scanned == 0) nr_scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) { /* * If we would return 0, our callers would understand that we * have nothing else to shrink and give up trying. By returning * 1 we keep it going and assume we'll be able to shrink next * time. */ freed = 1; goto out; } list_for_each_entry(shrinker, &shrinker_list, list) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) continue; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) sc.nid = 0; freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); } up_read(&shrinker_rwsem); out: cond_resched(); return freed; } void drop_slab_node(int nid) { unsigned long freed; do { struct mem_cgroup *memcg = NULL; freed = 0; do { freed += shrink_slab(GFP_KERNEL, nid, memcg, 1000, 1000); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); } while (freed > 10); } void drop_slab(void) { int nid; for_each_online_node(nid) drop_slab_node(nid); } static inline int is_page_cache_freeable(struct page *page) { /* * A freeable page cache page is referenced only by the caller * that isolated the page, the page cache radix tree and * optional buffer heads at page->private. */ return page_count(page) - page_has_private(page) == 2; } static int may_write_to_inode(struct inode *inode, struct scan_control *sc) { if (current->flags & PF_SWAPWRITE) return 1; if (!inode_write_congested(inode)) return 1; if (inode_to_bdi(inode) == current->backing_dev_info) return 1; return 0; } /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the page and once * that page is locked, the mapping is pinned. * * We're allowed to run sleeping lock_page() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); if (page_mapping(page) == mapping) mapping_set_error(mapping, error); unlock_page(page); } /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ PAGE_KEEP, /* move page to the active list, page is locked */ PAGE_ACTIVATE, /* page has been sent to the disk successfully, page is unlocked */ PAGE_SUCCESS, /* page is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in __generic_file_write_iter() against * this page's queue, we can perform writeback even if that * will block. * * If the page is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * swap_backing_dev_info is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. */ if (!is_page_cache_freeable(page)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned pages can have * page->mapping == NULL while being dirty with clean buffers. */ if (page_has_private(page)) { if (try_to_free_buffers(page)) { ClearPageDirty(page); pr_info("%s: orphaned page\n", __func__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_inode(mapping->host, sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, }; SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); if (res == AOP_WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return PAGE_ACTIVATE; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } trace_mm_vmscan_writepage(page, trace_reclaim_flags(page)); inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Same as remove_mapping, but if the page is removed from the mapping, it * gets returned with a refcount of 0. */ static int __remove_mapping(struct address_space *mapping, struct page *page, bool reclaimed) { unsigned long flags; struct mem_cgroup *memcg; BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); memcg = mem_cgroup_begin_page_stat(page); spin_lock_irqsave(&mapping->tree_lock, flags); /* * The non racy check for a busy page. * * Must be careful with the order of the tests. When someone has * a ref to the page, it may be possible that they dirty it then * drop the reference. So if PageDirty is tested before page_count * here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !PageDirty(page) [good] * SetPageDirty(page); * put_page(page); * !page_count(page) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the page->flags * load is not satisfied before that of page->_count. * * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ if (!page_freeze_refs(page, 2)) goto cannot_free; /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ if (unlikely(PageDirty(page))) { page_unfreeze_refs(page, 2); goto cannot_free; } if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); __delete_from_swap_cache(page); spin_unlock_irqrestore(&mapping->tree_lock, flags); mem_cgroup_end_page_stat(memcg); swapcache_free(swap); } else { void (*freepage)(struct page *); void *shadow = NULL; freepage = mapping->a_ops->freepage; /* * Remember a shadow entry for reclaimed file cache in * order to detect refaults, thus thrashing, later on. * * But don't store shadows in an address space that is * already exiting. This is not just an optizimation, * inode reclaim needs to empty out the radix tree or * the nodes are lost. Don't plant shadows behind its * back. */ if (reclaimed && page_is_file_cache(page) && !mapping_exiting(mapping)) shadow = workingset_eviction(mapping, page); __delete_from_page_cache(page, shadow, memcg); spin_unlock_irqrestore(&mapping->tree_lock, flags); mem_cgroup_end_page_stat(memcg); if (freepage != NULL) freepage(page); } return 1; cannot_free: spin_unlock_irqrestore(&mapping->tree_lock, flags); mem_cgroup_end_page_stat(memcg); return 0; } /* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */ int remove_mapping(struct address_space *mapping, struct page *page) { if (__remove_mapping(mapping, page, false)) { /* * Unfreezing the refcount with 1 rather than 2 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ page_unfreeze_refs(page, 1); return 1; } return 0; } /** * putback_lru_page - put previously isolated page onto appropriate LRU list * @page: page to be put back to appropriate lru list * * Add previously isolated @page to appropriate LRU list. * Page may still be unevictable for other reasons. * * lru_lock must not be held, interrupts must be enabled. */ void putback_lru_page(struct page *page) { bool is_unevictable; int was_unevictable = PageUnevictable(page); VM_BUG_ON_PAGE(PageLRU(page), page); redo: ClearPageUnevictable(page); if (page_evictable(page)) { /* * For evictable pages, we can use the cache. * In event of a race, worst case is we end up with an * unevictable page on [in]active list. * We know how to handle that. */ is_unevictable = false; lru_cache_add(page); } else { /* * Put unevictable pages directly on zone's unevictable * list. */ is_unevictable = true; add_page_to_unevictable_list(page); /* * When racing with an mlock or AS_UNEVICTABLE clearing * (page is unlocked) make sure that if the other thread * does not observe our setting of PG_lru and fails * isolation/check_move_unevictable_pages, * we see PG_mlocked/AS_UNEVICTABLE cleared below and move * the page back to the evictable list. * * The other side is TestClearPageMlocked() or shmem_lock(). */ smp_mb(); } /* * page's status can change while we move it among lru. If an evictable * page is on unevictable list, it never be freed. To avoid that, * check after we added it to the list, again. */ if (is_unevictable && page_evictable(page)) { if (!isolate_lru_page(page)) { put_page(page); goto redo; } /* This means someone else dropped this page from LRU * So, it will be freed or putback to LRU again. There is * nothing to do here. */ } if (was_unevictable && !is_unevictable) count_vm_event(UNEVICTABLE_PGRESCUED); else if (!was_unevictable && is_unevictable) count_vm_event(UNEVICTABLE_PGCULLED); put_page(page); /* drop ref from isolate */ } enum page_references { PAGEREF_RECLAIM, PAGEREF_RECLAIM_CLEAN, PAGEREF_KEEP, PAGEREF_ACTIVATE, }; static enum page_references page_check_references(struct page *page, struct scan_control *sc) { int referenced_ptes, referenced_page; unsigned long vm_flags; referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, &vm_flags); referenced_page = TestClearPageReferenced(page); /* * Mlock lost the isolation race with us. Let try_to_unmap() * move the page to the unevictable list. */ if (vm_flags & VM_LOCKED) return PAGEREF_RECLAIM; if (referenced_ptes) { if (PageSwapBacked(page)) return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need * to look twice if a mapped file page is used more * than once. * * Mark it and spare it for another trip around the * inactive list. Another page table reference will * lead to its activation. * * Note: the mark is set for activated pages as well * so that recently deactivated but used pages are * quickly recovered. */ SetPageReferenced(page); if (referenced_page || referenced_ptes > 1) return PAGEREF_ACTIVATE; /* * Activate file-backed executable pages after first usage. */ if (vm_flags & VM_EXEC) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; } /* Reclaim if clean, defer dirty pages to writeback */ if (referenced_page && !PageSwapBacked(page)) return PAGEREF_RECLAIM_CLEAN; return PAGEREF_RECLAIM; } /* Check if a page is dirty or under writeback */ static void page_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) { struct address_space *mapping; /* * Anonymous pages are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them */ if (!page_is_file_cache(page)) { *dirty = false; *writeback = false; return; } /* By default assume that the page flags are accurate */ *dirty = PageDirty(page); *writeback = PageWriteback(page); /* Verify dirty/writeback state if the filesystem supports it */ if (!page_has_private(page)) return; mapping = page_mapping(page); if (mapping && mapping->a_ops->is_dirty_writeback) mapping->a_ops->is_dirty_writeback(page, dirty, writeback); } /* * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc, enum ttu_flags ttu_flags, unsigned long *ret_nr_dirty, unsigned long *ret_nr_unqueued_dirty, unsigned long *ret_nr_congested, unsigned long *ret_nr_writeback, unsigned long *ret_nr_immediate, bool force_reclaim) { LIST_HEAD(ret_pages); LIST_HEAD(free_pages); int pgactivate = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_reclaimed = 0; unsigned long nr_writeback = 0; unsigned long nr_immediate = 0; cond_resched(); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; enum page_references references = PAGEREF_RECLAIM_CLEAN; bool dirty, writeback; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (!trylock_page(page)) goto keep; VM_BUG_ON_PAGE(PageActive(page), page); VM_BUG_ON_PAGE(page_zone(page) != zone, page); sc->nr_scanned++; if (unlikely(!page_evictable(page))) goto cull_mlocked; if (!sc->may_unmap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); /* * The number of dirty pages determines if a zone is marked * reclaim_congested which affects wait_iff_congested. kswapd * will stall and start writing pages if the tail of the LRU * is all dirty unqueued pages. */ page_check_dirty_writeback(page, &dirty, &writeback); if (dirty || writeback) nr_dirty++; if (dirty && !writeback) nr_unqueued_dirty++; /* * Treat this page as congested if the underlying BDI is or if * pages are cycling through the LRU so quickly that the * pages marked for immediate reclaim are making it to the * end of the LRU a second time. */ mapping = page_mapping(page); if (((dirty || writeback) && mapping && inode_write_congested(mapping->host)) || (writeback && PageReclaim(page))) nr_congested++; /* * If a page at the tail of the LRU is under writeback, there * are three cases to consider. * * 1) If reclaim is encountering an excessive number of pages * under writeback and this page is both under writeback and * PageReclaim then it indicates that pages are being queued * for IO but are being recycled through the LRU before the * IO can complete. Waiting on the page itself risks an * indefinite stall if it is impossible to writeback the * page due to IO error or disconnected storage so instead * note that the LRU is being scanned too quickly and the * caller can stall after page list has been processed. * * 2) Global or new memcg reclaim encounters a page that is * not marked for immediate reclaim, or the caller does not * have __GFP_FS (or __GFP_IO if it's simply going to swap, * not to fs). In this case mark the page for immediate * reclaim and continue scanning. * * Require may_enter_fs because we would wait on fs, which * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * * 3) Legacy memcg encounters a page that is not already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many * pages are in writeback and there is nothing else to * reclaim. Wait for the writeback to complete. */ if (PageWriteback(page)) { /* Case 1 above */ if (current_is_kswapd() && PageReclaim(page) && test_bit(ZONE_WRITEBACK, &zone->flags)) { nr_immediate++; goto keep_locked; /* Case 2 above */ } else if (sane_reclaim(sc) || !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then * setting PageReclaim here end up interpreted * as PageReadahead - but that does not matter * enough to care. What we do want is for this * page to have PageReclaim set next time memcg * reclaim reaches the tests above, so it will * then wait_on_page_writeback() to avoid OOM; * and it's also appropriate in global reclaim. */ SetPageReclaim(page); nr_writeback++; goto keep_locked; /* Case 3 above */ } else { wait_on_page_writeback(page); } } if (!force_reclaim) references = page_check_references(page, sc); switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; case PAGEREF_KEEP: goto keep_locked; case PAGEREF_RECLAIM: case PAGEREF_RECLAIM_CLEAN: ; /* try to reclaim the page below */ } /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (!add_to_swap(page, page_list)) goto activate_locked; may_enter_fs = 1; /* Adding to swap updated mapping */ mapping = page_mapping(page); } /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, ttu_flags)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_MLOCK: goto cull_mlocked; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but only writeback * if many dirty pages have been encountered. */ if (page_is_file_cache(page) && (!current_is_kswapd() || !test_bit(ZONE_DIRTY, &zone->flags))) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() * except we already have the page isolated * and know it's dirty */ inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); SetPageReclaim(page); goto keep_locked; } if (references == PAGEREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sc)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page)) goto keep; if (PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * truncate_complete_page(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (page_has_private(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) { unlock_page(page); if (put_page_testzero(page)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this page shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed++; continue; } } } if (!mapping || !__remove_mapping(mapping, page, true)) goto keep_locked; /* * At this point, we have no other references and there is * no way to pick any more up (removed from LRU, removed * from pagecache). Can use non-atomic bitops now (and * we obviously don't have to worry about waking up a process * waiting on the page lock, because there are no references. */ __clear_page_locked(page); free_it: nr_reclaimed++; /* * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ list_add(&page->lru, &free_pages); continue; cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); list_add(&page->lru, &ret_pages); continue; activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) try_to_free_swap(page); VM_BUG_ON_PAGE(PageActive(page), page); SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); } mem_cgroup_uncharge_list(&free_pages); free_hot_cold_page_list(&free_pages, true); list_splice(&ret_pages, page_list); count_vm_events(PGACTIVATE, pgactivate); *ret_nr_dirty += nr_dirty; *ret_nr_congested += nr_congested; *ret_nr_unqueued_dirty += nr_unqueued_dirty; *ret_nr_writeback += nr_writeback; *ret_nr_immediate += nr_immediate; return nr_reclaimed; } unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, .may_unmap = 1, }; unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5; struct page *page, *next; LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && !isolated_balloon_page(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } } ret = shrink_page_list(&clean_pages, zone, &sc, TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); list_splice(&clean_pages, page_list); mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); return ret; } /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being * freed elsewhere are also ignored. * * page: page to consider * mode: one of the LRU isolation modes defined above * * returns 0 on success, -ve errno on failure. */ int __isolate_lru_page(struct page *page, isolate_mode_t mode) { int ret = -EINVAL; /* Only take pages on the LRU. */ if (!PageLRU(page)) return ret; /* Compaction should not handle unevictable pages but CMA can do so */ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY; /* * To minimise LRU disruption, the caller can indicate that it only * wants to isolate pages it will be able to operate on without * blocking - clean pages for the most part. * * ISOLATE_CLEAN means that only clean pages should be isolated. This * is used by reclaim when it is cannot write to backing storage * * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages * that it is possible to migrate without blocking */ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { /* All the caller can do on PageWriteback is block */ if (PageWriteback(page)) return ret; if (PageDirty(page)) { struct address_space *mapping; /* ISOLATE_CLEAN means only clean pages */ if (mode & ISOLATE_CLEAN) return ret; /* * Only pages without mappings or that have a * ->migratepage callback are possible to migrate * without blocking */ mapping = page_mapping(page); if (mapping && !mapping->a_ops->migratepage) return ret; } } if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) return ret; if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); ret = 0; } return ret; } /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @lruvec: The LRU vector to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. * @sc: The scan_control struct for this reclaim session * @mode: One of the LRU isolation modes * @lru: LRU list id for isolating * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct lruvec *lruvec, struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, enum lru_list lru) { struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct page *page; int nr_pages; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); VM_BUG_ON_PAGE(!PageLRU(page), page); switch (__isolate_lru_page(page, mode)) { case 0: nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_move(&page->lru, dst); nr_taken += nr_pages; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); continue; default: BUG(); } } *nr_scanned = scan; trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, nr_taken, mode, is_file_lru(lru)); return nr_taken; } /** * isolate_lru_page - tries to isolate a page from its LRU list * @page: page to isolate from its LRU list * * Isolates a @page from an LRU list, clears PageLRU and adjusts the * vmstat statistic corresponding to whatever LRU list the page was on. * * Returns 0 if the page was removed from an LRU list. * Returns -EBUSY if the page was not on an LRU list. * * The returned page will have PageLRU() cleared. If it was found on * the active list, it will have PageActive set. If it was found on * the unevictable list, it will have the PageUnevictable bit set. That flag * may need to be cleared by the caller before letting the page go. * * The vmstat statistic corresponding to the list on which the page was * found will be decremented. * * Restrictions: * (1) Must be called with an elevated refcount on the page. This is a * fundamentnal difference from isolate_lru_pages (which is called * without a stable reference). * (2) the lru_lock must not be held. * (3) interrupts must be enabled. */ int isolate_lru_page(struct page *page) { int ret = -EBUSY; VM_BUG_ON_PAGE(!page_count(page), page); if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(page, zone); if (PageLRU(page)) { int lru = page_lru(page); get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, lru); ret = 0; } spin_unlock_irq(&zone->lru_lock); } return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!sane_reclaim(sc)) return 0; if (file) { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) inactive >>= 3; return isolated > inactive; } static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone *zone = lruvec_zone(lruvec); LIST_HEAD(pages_to_free); /* * Put back any unfreeable pages. */ while (!list_empty(page_list)) { struct page *page = lru_to_page(page_list); int lru; VM_BUG_ON_PAGE(PageLRU(page), page); list_del(&page->lru); if (unlikely(!page_evictable(page))) { spin_unlock_irq(&zone->lru_lock); putback_lru_page(page); spin_lock_irq(&zone->lru_lock); continue; } lruvec = mem_cgroup_page_lruvec(page, zone); SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(page, lruvec, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; } if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge(page); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, &pages_to_free); } } /* * To save our caller's stack, now use input list for pages to free. */ list_splice(&pages_to_free, page_list); } /* * If a kernel thread (such as nfsd for loop-back mounts) services * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. * In that case we should only throttle if the backing device it is * writing to is congested. In other cases it is safe to throttle. */ static int current_may_throttle(void) { return !(current->flags & PF_LESS_THROTTLE) || current->backing_dev_info == NULL || bdi_write_congested(current->backing_dev_info); } /* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ static noinline_for_stack unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { LIST_HEAD(page_list); unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_writeback = 0; unsigned long nr_immediate = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; } lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, &nr_scanned, sc, isolate_mode, lru); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); if (global_reclaim(sc)) { __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); if (current_is_kswapd()) __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); } spin_unlock_irq(&zone->lru_lock); if (nr_taken == 0) return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_unqueued_dirty, &nr_congested, &nr_writeback, &nr_immediate, false); spin_lock_irq(&zone->lru_lock); reclaim_stat->recent_scanned[file] += nr_taken; if (global_reclaim(sc)) { if (current_is_kswapd()) __count_zone_vm_events(PGSTEAL_KSWAPD, zone, nr_reclaimed); else __count_zone_vm_events(PGSTEAL_DIRECT, zone, nr_reclaimed); } putback_inactive_pages(lruvec, &page_list); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge_list(&page_list); free_hot_cold_page_list(&page_list, true); /* * If reclaim is isolating dirty pages under writeback, it implies * that the long-lived page allocation rate is exceeding the page * laundering rate. Either the global limits are not being effective * at throttling processes due to the page distribution throughout * zones or there is heavy usage of a slow backing device. The * only option is to throttle from reclaim context which is not ideal * as there is no guarantee the dirtying process is throttled in the * same way balance_dirty_pages() manages. * * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number * of pages under pages flagged for immediate reclaim and stall if any * are encountered in the nr_immediate check below. */ if (nr_writeback && nr_writeback == nr_taken) set_bit(ZONE_WRITEBACK, &zone->flags); /* * Legacy memcg will stall in page writeback so avoid forcibly * stalling here. */ if (sane_reclaim(sc)) { /* * Tag a zone as congested if all the dirty pages scanned were * backed by a congested BDI and wait_iff_congested will stall. */ if (nr_dirty && nr_dirty == nr_congested) set_bit(ZONE_CONGESTED, &zone->flags); /* * If dirty pages are scanned that are not queued for IO, it * implies that flushers are not keeping up. In this case, flag * the zone ZONE_DIRTY and kswapd will start writing pages from * reclaim context. */ if (nr_unqueued_dirty == nr_taken) set_bit(ZONE_DIRTY, &zone->flags); /* * If kswapd scans pages marked marked for immediate * reclaim and under writeback (nr_immediate), it implies * that pages are cycling through the LRU faster than * they are written so also forcibly stall. */ if (nr_immediate && current_may_throttle()) congestion_wait(BLK_RW_ASYNC, HZ/10); } /* * Stall direct reclaim for IO completions if underlying BDIs or zone * is congested. Allow kswapd to continue until it starts encountering * unqueued dirty pages or cycling through the LRU too quickly. */ if (!sc->hibernation_mode && !current_is_kswapd() && current_may_throttle()) wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, zone_idx(zone), nr_scanned, nr_reclaimed, sc->priority, trace_shrink_flags(file)); return nr_reclaimed; } /* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more * processes, from rmap. * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold zone->lru_lock across the whole operation. But if * the pages are mapped, the processing is slow (page_referenced()) so we * should drop zone->lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because * nobody will play with that bit on a non-LRU page. * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ static void move_active_pages_to_lru(struct lruvec *lruvec, struct list_head *list, struct list_head *pages_to_free, enum lru_list lru) { struct zone *zone = lruvec_zone(lruvec); unsigned long pgmoved = 0; struct page *page; int nr_pages; while (!list_empty(list)) { page = lru_to_page(list); lruvec = mem_cgroup_page_lruvec(page, zone); VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, nr_pages); list_move(&page->lru, &lruvec->lists[lru]); pgmoved += nr_pages; if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge(page); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, pages_to_free); } } __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); if (!is_active_lru(lru)) __count_vm_events(PGDEACTIVATE, pgmoved); } static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { unsigned long nr_taken; unsigned long nr_scanned; unsigned long vm_flags; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); struct page *page; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; unsigned long nr_rotated = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, isolate_mode, lru); if (global_reclaim(sc)) __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); reclaim_stat->recent_scanned[file] += nr_taken; __count_zone_vm_events(PGREFILL, zone, nr_scanned); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); list_del(&page->lru); if (unlikely(!page_evictable(page))) { putback_lru_page(page); continue; } if (unlikely(buffer_heads_over_limit)) { if (page_has_private(page) && trylock_page(page)) { if (page_has_private(page)) try_to_release_page(page, 0); unlock_page(page); } } if (page_referenced(page, 0, sc->target_mem_cgroup, &vm_flags)) { nr_rotated += hpage_nr_pages(page); /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So * that executable code get better chances to stay in * memory under moderate memory pressure. Anon pages * are not likely to be evicted by use-once streaming * IO, plus JVM can create lots of anon VM_EXEC pages, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { list_add(&page->lru, &l_active); continue; } } ClearPageActive(page); /* we are de-activating */ list_add(&page->lru, &l_inactive); } /* * Move pages back to the lru list. */ spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as rotated, * even though only some of them are actually re-activated. This * helps balance scan pressure between file and anonymous pages in * get_scan_count. */ reclaim_stat->recent_rotated[file] += nr_rotated; move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge_list(&l_hold); free_hot_cold_page_list(&l_hold, true); } #ifdef CONFIG_SWAP static int inactive_anon_is_low_global(struct zone *zone) { unsigned long active, inactive; active = zone_page_state(zone, NR_ACTIVE_ANON); inactive = zone_page_state(zone, NR_INACTIVE_ANON); if (inactive * zone->inactive_ratio < active) return 1; return 0; } /** * inactive_anon_is_low - check if anonymous pages need to be deactivated * @lruvec: LRU vector to check * * Returns true if the zone does not have enough inactive anon pages, * meaning some active anon pages need to be deactivated. */ static int inactive_anon_is_low(struct lruvec *lruvec) { /* * If we don't have swap space, anonymous page deactivation * is pointless. */ if (!total_swap_pages) return 0; if (!mem_cgroup_disabled()) return mem_cgroup_inactive_anon_is_low(lruvec); return inactive_anon_is_low_global(lruvec_zone(lruvec)); } #else static inline int inactive_anon_is_low(struct lruvec *lruvec) { return 0; } #endif /** * inactive_file_is_low - check if file pages need to be deactivated * @lruvec: LRU vector to check * * When the system is doing streaming IO, memory pressure here * ensures that active file pages get deactivated, until more * than half of the file pages are on the inactive list. * * Once we get to that situation, protect the system's working * set from being evicted by disabling active file page aging. * * This uses a different ratio than the anonymous pages, because * the page cache uses a use-once replacement algorithm. */ static int inactive_file_is_low(struct lruvec *lruvec) { unsigned long inactive; unsigned long active; inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE); active = get_lru_size(lruvec, LRU_ACTIVE_FILE); return active > inactive; } static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru) { if (is_file_lru(lru)) return inactive_file_is_low(lruvec); else return inactive_anon_is_low(lruvec); } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { if (is_active_lru(lru)) { if (inactive_list_is_low(lruvec, lru)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); } enum scan_balance { SCAN_EQUAL, SCAN_FRACT, SCAN_ANON, SCAN_FILE, }; /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined * by looking at the fraction of the pages scanned we did rotate back * onto the active list instead of evict. * * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan */ static void get_scan_count(struct lruvec *lruvec, int swappiness, struct scan_control *sc, unsigned long *nr, unsigned long *lru_pages) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; u64 fraction[2]; u64 denominator = 0; /* gcc */ struct zone *zone = lruvec_zone(lruvec); unsigned long anon_prio, file_prio; enum scan_balance scan_balance; unsigned long anon, file; bool force_scan = false; unsigned long ap, fp; enum lru_list lru; bool some_scanned; int pass; /* * If the zone or memcg is small, nr[l] can be 0. This * results in no scanning on this priority and a potential * priority drop. Global direct reclaim can go to the next * zone and tends to have no problems. Global kswapd is for * zone balancing and it needs to scan a minimum amount. When * reclaiming for a memcg, a priority drop can cause high * latencies, so it's better to scan a minimum amount there as * well. */ if (current_is_kswapd()) { if (!zone_reclaimable(zone)) force_scan = true; if (!mem_cgroup_lruvec_online(lruvec)) force_scan = true; } if (!global_reclaim(sc)) force_scan = true; /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { scan_balance = SCAN_FILE; goto out; } /* * Global reclaim will swap to prevent OOM even with no * swappiness, but memcg users want to use this knob to * disable swapping for individual groups completely when * using the memory controller's swap limit feature would be * too expensive. */ if (!global_reclaim(sc) && !swappiness) { scan_balance = SCAN_FILE; goto out; } /* * Do not apply any pressure balancing cleverness when the * system is close to OOM, scan both anon and file equally * (unless the swappiness setting disagrees with swapping). */ if (!sc->priority && swappiness) { scan_balance = SCAN_EQUAL; goto out; } /* * Prevent the reclaimer from falling into the cache trap: as * cache pages start out inactive, every cache fault will tip * the scan balance towards the file LRU. And as the file LRU * shrinks, so does the window for rotation from references. * This means we have a runaway feedback loop where a tiny * thrashing file LRU becomes infinitely more attractive than * anon pages. Try to detect this based on file LRU size. */ if (global_reclaim(sc)) { unsigned long zonefile; unsigned long zonefree; zonefree = zone_page_state(zone, NR_FREE_PAGES); zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_FILE); if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { scan_balance = SCAN_ANON; goto out; } } /* * There is enough inactive page cache, do not reclaim * anything from the anonymous working set right now. */ if (!inactive_file_is_low(lruvec)) { scan_balance = SCAN_FILE; goto out; } scan_balance = SCAN_FRACT; /* * With swappiness at 100, anonymous and file have the same priority. * This scanning priority is essentially the inverse of IO cost. */ anon_prio = swappiness; file_prio = 200 - anon_prio; /* * OK, so we have swap space and a fair amount of page cache * pages. We use the recently rotated / recently scanned * ratios to determine how valuable each cache is. * * Because workloads change over time (and to avoid overflow) * we keep these statistics as a floating average, which ends * up weighing recent references more than old ones. * * anon in [0], file in [1] */ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + get_lru_size(lruvec, LRU_INACTIVE_ANON); file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + get_lru_size(lruvec, LRU_INACTIVE_FILE); spin_lock_irq(&zone->lru_lock); if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_rotated[0] /= 2; } if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { reclaim_stat->recent_scanned[1] /= 2; reclaim_stat->recent_rotated[1] /= 2; } /* * The amount of pressure on anon vs file pages is inversely * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); ap /= reclaim_stat->recent_rotated[0] + 1; fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; spin_unlock_irq(&zone->lru_lock); fraction[0] = ap; fraction[1] = fp; denominator = ap + fp + 1; out: some_scanned = false; /* Only use force_scan on second pass. */ for (pass = 0; !some_scanned && pass < 2; pass++) { *lru_pages = 0; for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long size; unsigned long scan; size = get_lru_size(lruvec, lru); scan = size >> sc->priority; if (!scan && pass && force_scan) scan = min(size, SWAP_CLUSTER_MAX); switch (scan_balance) { case SCAN_EQUAL: /* Scan lists relative to size */ break; case SCAN_FRACT: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. */ scan = div64_u64(scan * fraction[file], denominator); break; case SCAN_FILE: case SCAN_ANON: /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) { size = 0; scan = 0; } break; default: /* Look ma, no brain */ BUG(); } *lru_pages += size; nr[lru] = scan; /* * Skip the second pass and don't force_scan, * if we found something to scan. */ some_scanned |= !!scan; } } } /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void shrink_lruvec(struct lruvec *lruvec, int swappiness, struct scan_control *sc, unsigned long *lru_pages) { unsigned long nr[NR_LRU_LISTS]; unsigned long targets[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct blk_plug plug; bool scan_adjusted; get_scan_count(lruvec, swappiness, sc, nr, lru_pages); /* Record the original scan target for proportional adjustments later */ memcpy(targets, nr, sizeof(nr)); /* * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal * event that can occur when there is little memory pressure e.g. * multiple streaming readers/writers. Hence, we do not abort scanning * when the requested number of pages are reclaimed when scanning at * DEF_PRIORITY on the assumption that the fact we are direct * reclaiming implies that kswapd is not keeping up and it is best to * do a batch of work at once. For memcg reclaim one check is made to * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { unsigned long nr_anon, nr_file, percentage; unsigned long nr_scanned; for_each_evictable_lru(lru) { if (nr[lru]) { nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); } } if (nr_reclaimed < nr_to_reclaim || scan_adjusted) continue; /* * For kswapd and memcg, reclaim at least the number of pages * requested. Ensure that the anon and file LRUs are scanned * proportionally what was requested by get_scan_count(). We * stop reclaiming one LRU and reduce the amount scanning * proportional to the original scan target. */ nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; /* * It's just vindictive to attack the larger once the smaller * has gone to zero. And given the way we stop scanning the * smaller below, this makes sure that we only make one nudge * towards proportionality once we've got nr_to_reclaim. */ if (!nr_file || !nr_anon) break; if (nr_file > nr_anon) { unsigned long scan_target = targets[LRU_INACTIVE_ANON] + targets[LRU_ACTIVE_ANON] + 1; lru = LRU_BASE; percentage = nr_anon * 100 / scan_target; } else { unsigned long scan_target = targets[LRU_INACTIVE_FILE] + targets[LRU_ACTIVE_FILE] + 1; lru = LRU_FILE; percentage = nr_file * 100 / scan_target; } /* Stop scanning the smaller of the LRU */ nr[lru] = 0; nr[lru + LRU_ACTIVE] = 0; /* * Recalculate the other LRU scan count based on its original * scan target and the percentage scanning already complete */ lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); lru += LRU_ACTIVE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); scan_adjusted = true; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); throttle_vm_writeout(sc->gfp_mask); } /* Use reclaim/compaction for costly allocs or under memory pressure */ static bool in_reclaim_compaction(struct scan_control *sc) { if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && (sc->order > PAGE_ALLOC_COSTLY_ORDER || sc->priority < DEF_PRIORITY - 2)) return true; return false; } /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns * true if more pages should be reclaimed such that when the page allocator * calls try_to_compact_zone() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ static inline bool should_continue_reclaim(struct zone *zone, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) { unsigned long pages_for_compaction; unsigned long inactive_lru_pages; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) return false; /* Consider stopping depending on scan and reclaim activity */ if (sc->gfp_mask & __GFP_REPEAT) { /* * For __GFP_REPEAT allocations, stop reclaiming if the * full LRU list has been scanned and we are still failing * to reclaim pages. This full LRU scan is potentially * expensive but a __GFP_REPEAT caller really wants to succeed */ if (!nr_reclaimed && !nr_scanned) return false; } else { /* * For non-__GFP_REPEAT allocations which can presumably * fail without consequence, stop if we failed to reclaim * any pages from the last SWAP_CLUSTER_MAX number of * pages that were scanned. This will return to the * caller faster at the risk reclaim/compaction and * the resulting allocation attempt fails */ if (!nr_reclaimed) return false; } /* * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); if (sc->nr_reclaimed < pages_for_compaction && inactive_lru_pages > pages_for_compaction) return true; /* If compaction would go ahead or the allocation would succeed, stop */ switch (compaction_suitable(zone, sc->order, 0, 0)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; default: return true; } } static bool shrink_zone(struct zone *zone, struct scan_control *sc, bool is_classzone) { struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_reclaimed, nr_scanned; bool reclaimable = false; do { struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup_reclaim_cookie reclaim = { .zone = zone, .priority = sc->priority, }; unsigned long zone_lru_pages = 0; struct mem_cgroup *memcg; nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; memcg = mem_cgroup_iter(root, NULL, &reclaim); do { unsigned long lru_pages; unsigned long scanned; struct lruvec *lruvec; int swappiness; if (mem_cgroup_low(root, memcg)) { if (!sc->may_thrash) continue; mem_cgroup_events(memcg, MEMCG_LOW, 1); } lruvec = mem_cgroup_zone_lruvec(zone, memcg); swappiness = mem_cgroup_swappiness(memcg); scanned = sc->nr_scanned; shrink_lruvec(lruvec, swappiness, sc, &lru_pages); zone_lru_pages += lru_pages; if (memcg && is_classzone) shrink_slab(sc->gfp_mask, zone_to_nid(zone), memcg, sc->nr_scanned - scanned, lru_pages); /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the * zone. * * Limit reclaim, on the other hand, only cares about * nr_to_reclaim pages to be reclaimed and it will * retry with decreasing priority if one round over the * whole hierarchy is not sufficient. */ if (!global_reclaim(sc) && sc->nr_reclaimed >= sc->nr_to_reclaim) { mem_cgroup_iter_break(root, memcg); break; } } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); /* * Shrink the slab caches in the same proportion that * the eligible LRU pages were scanned. */ if (global_reclaim(sc) && is_classzone) shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, sc->nr_scanned - nr_scanned, zone_lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } vmpressure(sc->gfp_mask, sc->target_mem_cgroup, sc->nr_scanned - nr_scanned, sc->nr_reclaimed - nr_reclaimed); if (sc->nr_reclaimed - nr_reclaimed) reclaimable = true; } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, sc->nr_scanned - nr_scanned, sc)); return reclaimable; } /* * Returns true if compaction should go ahead for a high-order request, or * the high-order allocation would succeed without compaction. */ static inline bool compaction_ready(struct zone *zone, int order) { unsigned long balance_gap, watermark; bool watermark_ok; /* * Compaction takes time to run and there are potentially other * callers using the pages just freed. Continue reclaiming until * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); /* * If compaction is deferred, reclaim up to a point where * compaction will have a chance of success when re-enabled */ if (compaction_deferred(zone, order)) return watermark_ok; /* * If compaction is not ready to start and allocation is not likely * to succeed without it, then keep reclaiming. */ if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) return false; return watermark_ok; } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * We reclaim from a zone even if that zone is over high_wmark_pages(zone). * Because: * a) The caller may be trying to free *extra* pages to satisfy a higher-order * allocation or * b) The target zone may be at high_wmark_pages(zone) but the lower zones * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' * zone defense algorithm. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. * * Returns true if a zone was reclaimable. */ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; gfp_t orig_mask; enum zone_type requested_highidx = gfp_zone(sc->gfp_mask); bool reclaimable = false; /* * If the number of buffer_heads in the machine exceeds the maximum * allowed level, force direct reclaim to scan the highmem zone as * highmem pages could be pinning lowmem pages storing buffer_heads */ orig_mask = sc->gfp_mask; if (buffer_heads_over_limit) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, requested_highidx, sc->nodemask) { enum zone_type classzone_idx; if (!populated_zone(zone)) continue; classzone_idx = requested_highidx; while (!populated_zone(zone->zone_pgdat->node_zones + classzone_idx)) classzone_idx--; /* * Take care memory controller reclaiming has small influence * to global LRU. */ if (global_reclaim(sc)) { if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) continue; if (sc->priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; /* Let kswapd poll it */ /* * If we already have plenty of memory free for * compaction in this zone, don't free any more. * Even though compaction is invoked for any * non-zero order, only frequent costly order * reclamation is disruptive enough to become a * noticeable problem, like transparent huge * page allocations. */ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order > PAGE_ALLOC_COSTLY_ORDER && zonelist_zone_idx(z) <= requested_highidx && compaction_ready(zone, sc->order)) { sc->compaction_ready = true; continue; } /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and * scanned pages. This works for global memory pressure * and balancing, not for a memcg's limit. */ nr_soft_scanned = 0; nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, sc->order, sc->gfp_mask, &nr_soft_scanned); sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_scanned += nr_soft_scanned; if (nr_soft_reclaimed) reclaimable = true; /* need some check for avoid more shrink_zone() */ } if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) reclaimable = true; if (global_reclaim(sc) && !reclaimable && zone_reclaimable(zone)) reclaimable = true; } /* * Restore to original mask to avoid the impact on the caller if we * promoted it to __GFP_HIGHMEM. */ sc->gfp_mask = orig_mask; return reclaimable; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick the writeback threads and take explicit * naps in the hope that some of these pages can be written. But if the * allocating task holds filesystem locks which prevent writeout this might not * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int initial_priority = sc->priority; unsigned long total_scanned = 0; unsigned long writeback_threshold; bool zones_reclaimable; retry: delayacct_freepages_start(); if (global_reclaim(sc)) count_vm_event(ALLOCSTALL); do { vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); sc->nr_scanned = 0; zones_reclaimable = shrink_zones(zonelist, sc); total_scanned += sc->nr_scanned; if (sc->nr_reclaimed >= sc->nr_to_reclaim) break; if (sc->compaction_ready) break; /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc->priority < DEF_PRIORITY - 2) sc->may_writepage = 1; /* * Try to write back as many pages as we just scanned. This * tends to cause slow streaming writers to write data to the * disk smoothly, at the dirtying rate, which is nice. But * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; if (total_scanned > writeback_threshold) { wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, WB_REASON_TRY_TO_FREE_PAGES); sc->may_writepage = 1; } } while (--sc->priority >= 0); delayacct_freepages_end(); if (sc->nr_reclaimed) return sc->nr_reclaimed; /* Aborted reclaim to try compaction? don't OOM, then */ if (sc->compaction_ready) return 1; /* Untapped cgroup reserves? Don't OOM, retry. */ if (!sc->may_thrash) { sc->priority = initial_priority; sc->may_thrash = 1; goto retry; } /* Any of the zones still reclaimable? Don't OOM. */ if (zones_reclaimable) return 1; return 0; } static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; unsigned long free_pages = 0; int i; bool wmark_ok; for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; if (!populated_zone(zone) || zone_reclaimable_pages(zone) == 0) continue; pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ if (!pfmemalloc_reserve) return true; wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { pgdat->classzone_idx = min(pgdat->classzone_idx, (enum zone_type)ZONE_NORMAL); wake_up_interruptible(&pgdat->kswapd_wait); } return wmark_ok; } /* * Throttle direct reclaimers if backing storage is backed by the network * and the PFMEMALLOC reserve for the preferred node is getting dangerously * depleted. kswapd will continue to make progress and wake the processes * when the low watermark is reached. * * Returns true if a fatal signal was delivered during throttling. If this * happens, the page allocator should not consider triggering the OOM killer. */ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly * responsible for cleaning pages necessary for reclaim to make forward * progress. kjournald for example may enter direct reclaim while * committing a transaction where throttling it could forcing other * processes to block on log_wait_commit(). */ if (current->flags & PF_KTHREAD) goto out; /* * If a fatal signal is pending, this process should not throttle. * It should return quickly so it can exit and free its memory */ if (fatal_signal_pending(current)) goto out; /* * Check if the pfmemalloc reserves are ok by finding the first node * with a usable ZONE_NORMAL or lower zone. The expectation is that * GFP_KERNEL will be required for allocating network buffers when * swapping over the network so ZONE_HIGHMEM is unusable. * * Throttling is based on the first usable node and throttled processes * wait on a queue until kswapd makes progress and wakes them. There * is an affinity then between processes waking up and where reclaim * progress has been made assuming the process wakes on the same node. * More importantly, processes running on remote nodes will not compete * for remote pfmemalloc reserves and processes on different nodes * should make reasonable progress. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { if (zone_idx(zone) > ZONE_NORMAL) continue; /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; if (pfmemalloc_watermark_ok(pgdat)) goto out; break; } /* If no zone was usable by the allocation flags then do not throttle */ if (!pgdat) goto out; /* Account for the throttling */ count_vm_event(PGSCAN_DIRECT_THROTTLE); /* * If the caller cannot enter the filesystem, it's possible that it * is due to the caller holding an FS lock or performing a journal * transaction in the case of a filesystem like ext[3|4]. In this case, * it is not safe to block on pfmemalloc_wait as kswapd could be * blocked waiting on the same lock. Instead, throttle for up to a * second before continuing. */ if (!(gfp_mask & __GFP_FS)) { wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat), HZ); goto check_pending; } /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat)); check_pending: if (fatal_signal_pending(current)) return true; out: return false; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .nodemask = nodemask, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, }; /* * Do not enter reclaim if fatal signal was delivered while throttled. * 1 is returned so that the page allocator does not OOM kill at this * point. */ if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) return 1; trace_mm_vmscan_direct_reclaim_begin(order, sc.may_writepage, gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); return nr_reclaimed; } #ifdef CONFIG_MEMCG unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, struct zone *zone, unsigned long *nr_scanned) { struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .target_mem_cgroup = memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, }; struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); int swappiness = mem_cgroup_swappiness(memcg); unsigned long lru_pages; sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.may_writepage, sc.gfp_mask); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. * if we don't reclaim here, the shrink_zone from balance_pgdat * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ shrink_lruvec(lruvec, swappiness, &sc, &lru_pages); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, bool may_swap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .target_mem_cgroup = memcg, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, }; /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the * scan does not need to be the current node. */ nid = mem_cgroup_select_victim_node(memcg); zonelist = NODE_DATA(nid)->node_zonelists; trace_mm_vmscan_memcg_reclaim_begin(0, sc.may_writepage, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); return nr_reclaimed; } #endif static void age_active_anon(struct zone *zone, struct scan_control *sc) { struct mem_cgroup *memcg; if (!total_swap_pages) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); memcg = mem_cgroup_iter(NULL, memcg, NULL); } while (memcg); } static bool zone_balanced(struct zone *zone, int order, unsigned long balance_gap, int classzone_idx) { if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + balance_gap, classzone_idx, 0)) return false; if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, order, 0, classzone_idx) == COMPACT_SKIPPED) return false; return true; } /* * pgdat_balanced() is used when checking if a node is balanced. * * For order-0, all zones must be balanced! * * For high-order allocations only zones that meet watermarks and are in a * zone allowed by the callers classzone_idx are added to balanced_pages. The * total of balanced pages must be at least 25% of the zones allowed by * classzone_idx for the node to be considered balanced. Forcing all zones to * be balanced for high orders can cause excessive reclaim when there are * imbalanced zones. * The choice of 25% is due to * o a 16M DMA zone that is balanced will not balance a zone on any * reasonable sized machine * o On all other machines, the top zone must be at least a reasonable * percentage of the middle zones. For example, on 32-bit x86, highmem * would need to be at least 256M for it to be balance a whole node. * Similarly, on x86-64 the Normal zone would need to be at least 1G * to balance a node on its own. These seemed like reasonable ratios. */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) { unsigned long managed_pages = 0; unsigned long balanced_pages = 0; int i; /* Check the watermark levels */ for (i = 0; i <= classzone_idx; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; managed_pages += zone->managed_pages; /* * A special case here: * * balance_pgdat() skips over all_unreclaimable after * DEF_PRIORITY. Effectively, it considers them balanced so * they must be considered balanced here as well! */ if (!zone_reclaimable(zone)) { balanced_pages += zone->managed_pages; continue; } if (zone_balanced(zone, order, 0, i)) balanced_pages += zone->managed_pages; else if (!order) return false; } if (order) return balanced_pages >= (managed_pages >> 2); else return true; } /* * Prepare kswapd for sleeping. This verifies that there are no processes * waiting in throttle_direct_reclaim() and that watermarks have been met. * * Returns true if kswapd is ready to sleep */ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, int classzone_idx) { /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return false; /* * The throttled processes are normally woken up in balance_pgdat() as * soon as pfmemalloc_watermark_ok() is true. But there is a potential * race between when kswapd checks the watermarks and a process gets * throttled. There is also a potential race if processes get * throttled, kswapd wakes, a large process exits thereby balancing the * zones, which causes kswapd to exit balance_pgdat() before reaching * the wake up checks. If kswapd is going to sleep, no process should * be sleeping on pfmemalloc_wait, so wake them now if necessary. If * the wake up is premature, processes will wake kswapd and get * throttled again. The difference from wake ups in balance_pgdat() is * that here we are under prepare_to_wait(). */ if (waitqueue_active(&pgdat->pfmemalloc_wait)) wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } /* * kswapd shrinks the zone by the number of pages required to reach * the high watermark. * * Returns true if kswapd scanned at least the requested number of pages to * reclaim or if the lack of progress was due to pages under writeback. * This is used to determine if the scanning priority needs to be raised. */ static bool kswapd_shrink_zone(struct zone *zone, int classzone_idx, struct scan_control *sc, unsigned long *nr_attempted) { int testorder = sc->order; unsigned long balance_gap; bool lowmem_pressure; /* Reclaim above the high watermark. */ sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); /* * Kswapd reclaims only single pages with compaction enabled. Trying * too hard to reclaim until contiguous free pages have become * available can hurt performance by evicting too much useful data * from memory. Do not reclaim more than needed for compaction. */ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && compaction_suitable(zone, sc->order, 0, classzone_idx) != COMPACT_SKIPPED) testorder = 0; /* * We put equal pressure on every zone, unless one zone has way too * many pages free already. The "too many pages" is defined as the * high wmark plus a "gap" where the gap is either the low * watermark or 1% of the zone, whichever is smaller. */ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); /* * If there is no low memory pressure or the zone is balanced then no * reclaim is necessary */ lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); if (!lowmem_pressure && zone_balanced(zone, testorder, balance_gap, classzone_idx)) return true; shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); /* Account for the number of pages attempted to reclaim */ *nr_attempted += sc->nr_to_reclaim; clear_bit(ZONE_WRITEBACK, &zone->flags); /* * If a zone reaches its high watermark, consider it to be no longer * congested. It's possible there are dirty pages backed by congested * BDIs but as pressure is relieved, speculatively avoid congestion * waits. */ if (zone_reclaimable(zone) && zone_balanced(zone, testorder, 0, classzone_idx)) { clear_bit(ZONE_CONGESTED, &zone->flags); clear_bit(ZONE_DIRTY, &zone->flags); } return sc->nr_scanned >= sc->nr_to_reclaim; } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). * * Returns the final order kswapd was reclaiming at * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. * What we do is to detect the case where all pages in the zone have been * scanned twice and there has been zero successful reclaim. Mark the zone as * dead and from now on, only perform a short scan. Basically we're polling * the zone for when the problem goes away. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > high_wmark_pages(zone), but once a zone is * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the * lower zones regardless of the number of free pages in the lower zones. This * interoperates with the page allocator fallback scheme to ensure that aging * of pages is balanced across the zones. */ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, int *classzone_idx) { int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .order = order, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, }; count_vm_event(PAGEOUTRUN); do { unsigned long nr_attempted = 0; bool raise_priority = true; bool pgdat_needs_compaction = (order > 0); sc.nr_reclaimed = 0; /* * Scan in the highmem->dma direction for the highest * zone which needs scanning */ for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (sc.priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; /* * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ age_active_anon(zone, &sc); /* * If the number of buffer_heads in the machine * exceeds the maximum allowed level and this node * has a highmem zone, force kswapd to reclaim from * it to relieve lowmem pressure. */ if (buffer_heads_over_limit && is_highmem_idx(i)) { end_zone = i; break; } if (!zone_balanced(zone, order, 0, 0)) { end_zone = i; break; } else { /* * If balanced, clear the dirty and congested * flags */ clear_bit(ZONE_CONGESTED, &zone->flags); clear_bit(ZONE_DIRTY, &zone->flags); } } if (i < 0) goto out; for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; /* * If any zone is currently balanced then kswapd will * not call compaction as it is expected that the * necessary pages are already available. */ if (pgdat_needs_compaction && zone_watermark_ok(zone, order, low_wmark_pages(zone), *classzone_idx, 0)) pgdat_needs_compaction = false; } /* * If we're getting trouble reclaiming, start doing writepage * even in laptop mode. */ if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; /* * Now scan the zone in the dma->highmem direction, stopping * at the last zone which needs scanning. * * We do this because the page allocator works in the opposite * direction. This prevents the page allocator from allocating * pages behind kswapd's direction of progress, which would * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (sc.priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; sc.nr_scanned = 0; nr_soft_scanned = 0; /* * Call soft limit reclaim before calling shrink_zone. */ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask, &nr_soft_scanned); sc.nr_reclaimed += nr_soft_reclaimed; /* * There should be no need to raise the scanning * priority if enough pages are already being scanned * that that high watermark would be met at 100% * efficiency. */ if (kswapd_shrink_zone(zone, end_zone, &sc, &nr_attempted)) raise_priority = false; } /* * If the low watermark is met there is no need for processes * to be throttled on pfmemalloc_wait as they should not be * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && pfmemalloc_watermark_ok(pgdat)) wake_up_all(&pgdat->pfmemalloc_wait); /* * Fragmentation may mean that the system cannot be rebalanced * for high-order allocations in all zones. If twice the * allocation size has been reclaimed and the zones are still * not balanced then recheck the watermarks at order-0 to * prevent kswapd reclaiming excessively. Assume that a * process requested a high-order can direct reclaim/compact. */ if (order && sc.nr_reclaimed >= 2UL << order) order = sc.order = 0; /* Check if kswapd should be suspending */ if (try_to_freeze() || kthread_should_stop()) break; /* * Compact if necessary and kswapd is reclaiming at least the * high watermark number of pages as requsted */ if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted) compact_pgdat(pgdat, order); /* * Raise priority if scanning rate is too low or there was no * progress in reclaiming pages */ if (raise_priority || !sc.nr_reclaimed) sc.priority--; } while (sc.priority >= 1 && !pgdat_balanced(pgdat, order, *classzone_idx)); out: /* * Return the order we were reclaiming at so prepare_kswapd_sleep() * makes a decision on the order we were last reclaiming at. However, * if another caller entered the allocator slow path while kswapd * was awake, order will remain at the higher level */ *classzone_idx = end_zone; return order; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); /* Try to sleep for a short interval */ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) { trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ reset_isolation_suitable(pgdat); if (!kthread_should_stop()) schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } finish_wait(&pgdat->kswapd_wait, &wait); } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned long order, new_order; unsigned balanced_order; int classzone_idx, new_classzone_idx; int balanced_classzone_idx; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); order = new_order = 0; balanced_order = 0; classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; balanced_classzone_idx = classzone_idx; for ( ; ; ) { bool ret; /* * If the last balance_pgdat was unsuccessful it's unlikely a * new request of a similar or harder type will succeed soon * so consider going to sleep on the basis we reclaimed at */ if (balanced_classzone_idx >= new_classzone_idx && balanced_order == new_order) { new_order = pgdat->kswapd_max_order; new_classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' * allocation or has tigher zone constraints */ order = new_order; classzone_idx = new_classzone_idx; } else { kswapd_try_to_sleep(pgdat, balanced_order, balanced_classzone_idx); order = pgdat->kswapd_max_order; classzone_idx = pgdat->classzone_idx; new_order = order; new_classzone_idx = classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } ret = try_to_freeze(); if (kthread_should_stop()) break; /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (!ret) { trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); balanced_classzone_idx = classzone_idx; balanced_order = balance_pgdat(pgdat, order, &balanced_classzone_idx); } } tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); return 0; } /* * A zone is low on free memory, so wake its kswapd task to service it. */ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; if (!populated_zone(zone)) return; if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) return; pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) { pgdat->kswapd_max_order = order; pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); } if (!waitqueue_active(&pgdat->kswapd_wait)) return; if (zone_balanced(zone, order, 0, 0)) return; trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { struct reclaim_state reclaim_state; struct scan_control sc = { .nr_to_reclaim = nr_to_reclaim, .gfp_mask = GFP_HIGHUSER_MOVABLE, .priority = DEF_PRIORITY, .may_writepage = 1, .may_unmap = 1, .may_swap = 1, .hibernation_mode = 1, }; struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); struct task_struct *p = current; unsigned long nr_reclaimed; p->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(sc.gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; nr_reclaimed = do_try_to_free_pages(zonelist, &sc); p->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); p->flags &= ~PF_MEMALLOC; return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ set_cpus_allowed_ptr(pgdat->kswapd, mask); } } return NOTIFY_OK; } /* * This kswapd start function will be called by init and node-hot-add. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. */ int kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int ret = 0; if (pgdat->kswapd) return 0; pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); pr_err("Failed to start kswapd on node %d\n", nid); ret = PTR_ERR(pgdat->kswapd); pgdat->kswapd = NULL; } return ret; } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * hold mem_hotplug_begin/end(). */ void kswapd_stop(int nid) { struct task_struct *kswapd = NODE_DATA(nid)->kswapd; if (kswapd) { kthread_stop(kswapd); NODE_DATA(nid)->kswapd = NULL; } } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Zone reclaim mode * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. */ int zone_reclaim_mode __read_mostly; #define RECLAIM_OFF 0 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ /* * Priority for ZONE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define ZONE_RECLAIM_PRIORITY 4 /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to * occur. */ int sysctl_min_unmapped_ratio = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int sysctl_min_slab_ratio = 5; static inline unsigned long zone_unmapped_file_pages(struct zone *zone) { unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_ACTIVE_FILE); /* * It's possible for there to be more file mapped pages than * accounted for by the pages on the file LRU lists because * tmpfs pages accounted for as ANON can also be FILE_MAPPED */ return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; } /* Work out how many page cache pages we can reclaim in this reclaim_mode */ static long zone_pagecache_reclaimable(struct zone *zone) { long nr_pagecache_reclaimable; long delta = 0; /* * If RECLAIM_UNMAP is set, then all file pages are considered * potentially reclaimable. Otherwise, we have to worry about * pages like swapcache and zone_unmapped_file_pages() provides * a better estimate */ if (zone_reclaim_mode & RECLAIM_UNMAP) nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); else nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); /* If we can't clean pages, remove dirty pages from consideration */ if (!(zone_reclaim_mode & RECLAIM_WRITE)) delta += zone_page_state(zone, NR_FILE_DIRTY); /* Watch for any possible underflows due to delta */ if (unlikely(delta > nr_pagecache_reclaimable)) delta = nr_pagecache_reclaimable; return nr_pagecache_reclaimable - delta; } /* * Try to free up some pages from this zone through reclaim. */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .priority = ZONE_RECLAIM_PRIORITY, .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP), .may_swap = 1, }; cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP * and we also need to be able to write out pages for RECLAIM_WRITE * and RECLAIM_UNMAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { /* * Free memory by calling shrink zone with increasing * priorities until we have enough memory freed. */ do { shrink_zone(zone, &sc, true); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); lockdep_clear_current_reclaim_state(); return sc.nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { int node_id; int ret; /* * Zone reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the zone is overallocated. So we do not reclaim * if less than a specified percentage of the zone is used by * unmapped file backed pages. */ if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; if (!zone_reclaimable(zone)) return ZONE_RECLAIM_FULL; /* * Do not scan if the allocation should not be delayed. */ if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) return ZONE_RECLAIM_NOSCAN; /* * Only run zone reclaim on the local zone or on zones that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ node_id = zone_to_nid(zone); if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); return ret; } #endif /* * page_evictable - test whether a page is evictable * @page: the page to test * * Test whether page is evictable--i.e., should be placed on active/inactive * lists vs unevictable list. * * Reasons page might not be evictable: * (1) page's mapping marked unevictable * (2) page is part of an mlocked VMA * */ int page_evictable(struct page *page) { return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); } #ifdef CONFIG_SHMEM /** * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list * @pages: array of pages to check * @nr_pages: number of pages to check * * Checks pages for evictability and moves them to the appropriate lru list. * * This function is only used for SysV IPC SHM_UNLOCK. */ void check_move_unevictable_pages(struct page **pages, int nr_pages) { struct lruvec *lruvec; struct zone *zone = NULL; int pgscanned = 0; int pgrescued = 0; int i; for (i = 0; i < nr_pages; i++) { struct page *page = pages[i]; struct zone *pagezone; pgscanned++; pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } lruvec = mem_cgroup_page_lruvec(page, zone); if (!PageLRU(page) || !PageUnevictable(page)) continue; if (page_evictable(page)) { enum lru_list lru = page_lru_base_type(page); VM_BUG_ON_PAGE(PageActive(page), page); ClearPageUnevictable(page); del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec, lru); pgrescued++; } } if (zone) { __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); spin_unlock_irq(&zone->lru_lock); } } #endif /* CONFIG_SHMEM */
Basler/linux-usb-zerocopy
mm/vmscan.c
C
gpl-2.0
112,737
/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <my_global.h> #include <m_string.h> #include <stdarg.h> #include <m_ctype.h> #define MAX_ARGS 32 /* max positional args count*/ #define MAX_PRINT_INFO 32 /* max print position count */ #define LENGTH_ARG 1 #define WIDTH_ARG 2 #define PREZERO_ARG 4 #define ESCAPED_ARG 8 typedef struct pos_arg_info ARGS_INFO; typedef struct print_info PRINT_INFO; struct pos_arg_info { char arg_type; /* argument type */ uint have_longlong; /* used from integer values */ char *str_arg; /* string value of the arg */ longlong longlong_arg; /* integer value of the arg */ double double_arg; /* double value of the arg */ }; struct print_info { char arg_type; /* argument type */ size_t arg_idx; /* index of the positional arg */ size_t length; /* print width or arg index */ size_t width; /* print width or arg index */ uint flags; const char *begin; /**/ const char *end; /**/ }; /** Calculates print length or index of positional argument @param fmt processed string @param length print length or index of positional argument @param pre_zero returns flags with PREZERO_ARG set if necessary @retval string position right after length digits */ static const char *get_length(const char *fmt, size_t *length, uint *pre_zero) { for (; my_isdigit(&my_charset_latin1, *fmt); fmt++) { *length= *length * 10 + (uint)(*fmt - '0'); if (!*length) *pre_zero|= PREZERO_ARG; /* first digit was 0 */ } return fmt; } /** Calculates print width or index of positional argument @param fmt processed string @param width print width or index of positional argument @retval string position right after width digits */ static const char *get_width(const char *fmt, size_t *width) { for (; my_isdigit(&my_charset_latin1, *fmt); fmt++) { *width= *width * 10 + (uint)(*fmt - '0'); } return fmt; } /** Calculates print width or index of positional argument @param fmt processed string @param have_longlong TRUE if longlong is required @retval string position right after modifier symbol */ static const char *check_longlong(const char *fmt, uint *have_longlong) { *have_longlong= 0; if (*fmt == 'l') { fmt++; if (*fmt != 'l') *have_longlong= (sizeof(long) == sizeof(longlong)); else { fmt++; *have_longlong= 1; } } else if (*fmt == 'z') { fmt++; *have_longlong= (sizeof(size_t) == sizeof(longlong)); } return fmt; } /** Returns escaped string @param cs string charset @param to buffer where escaped string will be placed @param end end of buffer @param par string to escape @param par_len string length @param quote_char character for quoting @retval position in buffer which points on the end of escaped string */ static char *backtick_string(CHARSET_INFO *cs, char *to, char *end, char *par, size_t par_len, char quote_char) { uint char_len; char *start= to; char *par_end= par + par_len; size_t buff_length= (size_t) (end - to); if (buff_length <= par_len) goto err; *start++= quote_char; for ( ; par < par_end; par+= char_len) { uchar c= *(uchar *) par; if (!(char_len= my_mbcharlen(cs, c))) char_len= 1; if (char_len == 1 && c == (uchar) quote_char ) { if (start + 1 >= end) goto err; *start++= quote_char; } if (start + char_len >= end) goto err; start= strnmov(start, par, char_len); } if (start + 1 >= end) goto err; *start++= quote_char; return start; err: *to='\0'; return to; } /** Prints string argument */ static char *process_str_arg(CHARSET_INFO *cs, char *to, char *end, size_t width, char *par, uint print_type) { int well_formed_error; size_t plen, left_len= (size_t) (end - to) + 1; if (!par) par = (char*) "(null)"; plen= strnlen(par, width); if (left_len <= plen) plen = left_len - 1; plen= cs->cset->well_formed_len(cs, par, par + plen, width, &well_formed_error); if (print_type & ESCAPED_ARG) to= backtick_string(cs, to, end, par, plen, '`'); else to= strnmov(to,par,plen); return to; } /** Prints binary argument */ static char *process_bin_arg(char *to, char *end, size_t width, char *par) { DBUG_ASSERT(to <= end); if (to + width + 1 > end) width= end - to - 1; /* sign doesn't matter */ memmove(to, par, width); to+= width; return to; } /** Prints double or float argument */ static char *process_dbl_arg(char *to, char *end, size_t width, double par, char arg_type) { if (width == SIZE_T_MAX) width= FLT_DIG; /* width not set, use default */ else if (width >= NOT_FIXED_DEC) width= NOT_FIXED_DEC - 1; /* max.precision for my_fcvt() */ width= min(width, (size_t)(end-to) - 1); if (arg_type == 'f') to+= my_fcvt(par, (int)width , to, NULL); else to+= my_gcvt(par, MY_GCVT_ARG_DOUBLE, (int) width , to, NULL); return to; } /** Prints integer argument */ static char *process_int_arg(char *to, char *end, size_t length, longlong par, char arg_type, uint print_type) { size_t res_length, to_length; char *store_start= to, *store_end; char buff[32]; if ((to_length= (size_t) (end-to)) < 16 || length) store_start= buff; if (arg_type == 'd' || arg_type == 'i') store_end= longlong10_to_str(par, store_start, -10); else if (arg_type == 'u') store_end= longlong10_to_str(par, store_start, 10); else if (arg_type == 'p') { store_start[0]= '0'; store_start[1]= 'x'; store_end= ll2str(par, store_start + 2, 16, 0); } else if (arg_type == 'o') { store_end= ll2str(par, store_start, 8, 0); } else { DBUG_ASSERT(arg_type == 'X' || arg_type =='x'); store_end= ll2str(par, store_start, 16, (arg_type == 'X')); } if ((res_length= (size_t) (store_end - store_start)) > to_length) return to; /* num doesn't fit in output */ /* If %#d syntax was used, we have to pre-zero/pre-space the string */ if (store_start == buff) { length= min(length, to_length); if (res_length < length) { size_t diff= (length- res_length); bfill(to, diff, (print_type & PREZERO_ARG) ? '0' : ' '); if (arg_type == 'p' && print_type & PREZERO_ARG) { if (diff > 1) to[1]= 'x'; else store_start[0]= 'x'; store_start[1]= '0'; } to+= diff; } bmove(to, store_start, res_length); } to+= res_length; return to; } /** Procesed positional arguments. @param cs string charset @param to buffer where processed string will be place @param end end of buffer @param par format string @param arg_index arg index of the first occurrence of positional arg @param ap list of parameters @retval end of buffer where processed string is placed */ static char *process_args(CHARSET_INFO *cs, char *to, char *end, const char* fmt, size_t arg_index, va_list ap) { ARGS_INFO args_arr[MAX_ARGS]; PRINT_INFO print_arr[MAX_PRINT_INFO]; uint idx= 0, arg_count= arg_index; start: /* Here we are at the beginning of positional argument, right after $ */ arg_index--; print_arr[idx].flags= 0; if (*fmt == '`') { print_arr[idx].flags|= ESCAPED_ARG; fmt++; } if (*fmt == '-') fmt++; print_arr[idx].length= print_arr[idx].width= 0; /* Get print length */ if (*fmt == '*') { fmt++; fmt= get_length(fmt, &print_arr[idx].length, &print_arr[idx].flags); print_arr[idx].length--; DBUG_ASSERT(*fmt == '$' && print_arr[idx].length < MAX_ARGS); args_arr[print_arr[idx].length].arg_type= 'd'; print_arr[idx].flags|= LENGTH_ARG; arg_count= max(arg_count, print_arr[idx].length + 1); fmt++; } else fmt= get_length(fmt, &print_arr[idx].length, &print_arr[idx].flags); if (*fmt == '.') { fmt++; /* Get print width */ if (*fmt == '*') { fmt++; fmt= get_width(fmt, &print_arr[idx].width); print_arr[idx].width--; DBUG_ASSERT(*fmt == '$' && print_arr[idx].width < MAX_ARGS); args_arr[print_arr[idx].width].arg_type= 'd'; print_arr[idx].flags|= WIDTH_ARG; arg_count= max(arg_count, print_arr[idx].width + 1); fmt++; } else fmt= get_width(fmt, &print_arr[idx].width); } else print_arr[idx].width= SIZE_T_MAX; fmt= check_longlong(fmt, &args_arr[arg_index].have_longlong); if (*fmt == 'p') args_arr[arg_index].have_longlong= (sizeof(void *) == sizeof(longlong)); args_arr[arg_index].arg_type= print_arr[idx].arg_type= *fmt; print_arr[idx].arg_idx= arg_index; print_arr[idx].begin= ++fmt; while (*fmt && *fmt != '%') fmt++; if (!*fmt) /* End of format string */ { uint i; print_arr[idx].end= fmt; /* Obtain parameters from the list */ for (i= 0 ; i < arg_count; i++) { switch (args_arr[i].arg_type) { case 's': case 'b': args_arr[i].str_arg= va_arg(ap, char *); break; case 'f': case 'g': args_arr[i].double_arg= va_arg(ap, double); break; case 'd': case 'i': case 'u': case 'x': case 'X': case 'o': case 'p': if (args_arr[i].have_longlong) args_arr[i].longlong_arg= va_arg(ap,longlong); else if (args_arr[i].arg_type == 'd' || args_arr[i].arg_type == 'i') args_arr[i].longlong_arg= va_arg(ap, int); else args_arr[i].longlong_arg= va_arg(ap, uint); break; case 'c': args_arr[i].longlong_arg= va_arg(ap, int); break; default: DBUG_ASSERT(0); } } /* Print result string */ for (i= 0; i <= idx; i++) { size_t width= 0, length= 0; switch (print_arr[i].arg_type) { case 's': { char *par= args_arr[print_arr[i].arg_idx].str_arg; width= (print_arr[i].flags & WIDTH_ARG) ? (size_t)args_arr[print_arr[i].width].longlong_arg : print_arr[i].width; to= process_str_arg(cs, to, end, width, par, print_arr[i].flags); break; } case 'b': { char *par = args_arr[print_arr[i].arg_idx].str_arg; width= (print_arr[i].flags & WIDTH_ARG) ? (size_t)args_arr[print_arr[i].width].longlong_arg : print_arr[i].width; to= process_bin_arg(to, end, width, par); break; } case 'c': { if (to == end) break; *to++= (char) args_arr[print_arr[i].arg_idx].longlong_arg; break; } case 'f': case 'g': { double d= args_arr[print_arr[i].arg_idx].double_arg; width= (print_arr[i].flags & WIDTH_ARG) ? (uint)args_arr[print_arr[i].width].longlong_arg : print_arr[i].width; to= process_dbl_arg(to, end, width, d, print_arr[i].arg_type); break; } case 'd': case 'i': case 'u': case 'x': case 'X': case 'o': case 'p': { /* Integer parameter */ longlong larg; length= (print_arr[i].flags & LENGTH_ARG) ? (size_t)args_arr[print_arr[i].length].longlong_arg : print_arr[i].length; if (args_arr[print_arr[i].arg_idx].have_longlong) larg = args_arr[print_arr[i].arg_idx].longlong_arg; else if (print_arr[i].arg_type == 'd' || print_arr[i].arg_type == 'i' ) larg = (int) args_arr[print_arr[i].arg_idx].longlong_arg; else larg= (uint) args_arr[print_arr[i].arg_idx].longlong_arg; to= process_int_arg(to, end, length, larg, print_arr[i].arg_type, print_arr[i].flags); break; } default: break; } if (to == end) break; length= min(end - to , print_arr[i].end - print_arr[i].begin); if (to + length < end) length++; to= strnmov(to, print_arr[i].begin, length); } DBUG_ASSERT(to <= end); *to='\0'; /* End of errmessage */ return to; } else { /* Process next positional argument*/ DBUG_ASSERT(*fmt == '%'); print_arr[idx].end= fmt - 1; idx++; fmt++; arg_index= 0; fmt= get_width(fmt, &arg_index); DBUG_ASSERT(*fmt == '$'); fmt++; arg_count= max(arg_count, arg_index); goto start; } return 0; } /** Produces output string according to a format string See the detailed documentation around my_snprintf_service_st @param cs string charset @param to buffer where processed string will be place @param n size of buffer @param par format string @param ap list of parameters @retval length of result string */ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n, const char* fmt, va_list ap) { char *start=to, *end=to+n-1; size_t length, width; uint print_type, have_longlong; for (; *fmt ; fmt++) { if (*fmt != '%') { if (to == end) /* End of buffer */ break; *to++= *fmt; /* Copy ordinary char */ continue; } fmt++; /* skip '%' */ length= width= 0; print_type= 0; /* Read max fill size (only used with %d and %u) */ if (my_isdigit(&my_charset_latin1, *fmt)) { fmt= get_length(fmt, &length, &print_type); if (*fmt == '$') { to= process_args(cs, to, end, (fmt+1), length, ap); return (size_t) (to - start); } } else { if (*fmt == '`') { print_type|= ESCAPED_ARG; fmt++; } if (*fmt == '-') fmt++; if (*fmt == '*') { fmt++; length= va_arg(ap, int); } else fmt= get_length(fmt, &length, &print_type); } if (*fmt == '.') { fmt++; if (*fmt == '*') { fmt++; width= va_arg(ap, int); } else fmt= get_width(fmt, &width); } else width= SIZE_T_MAX; fmt= check_longlong(fmt, &have_longlong); if (*fmt == 's') /* String parameter */ { reg2 char *par= va_arg(ap, char *); to= process_str_arg(cs, to, end, width, par, print_type); continue; } else if (*fmt == 'b') /* Buffer parameter */ { char *par = va_arg(ap, char *); to= process_bin_arg(to, end, width, par); continue; } else if (*fmt == 'f' || *fmt == 'g') { double d= va_arg(ap, double); to= process_dbl_arg(to, end, width, d, *fmt); continue; } else if (*fmt == 'd' || *fmt == 'i' || *fmt == 'u' || *fmt == 'x' || *fmt == 'X' || *fmt == 'p' || *fmt == 'o') { /* Integer parameter */ longlong larg; if (*fmt == 'p') have_longlong= (sizeof(void *) == sizeof(longlong)); if (have_longlong) larg = va_arg(ap,longlong); else if (*fmt == 'd' || *fmt == 'i') larg = va_arg(ap, int); else larg= va_arg(ap, uint); to= process_int_arg(to, end, length, larg, *fmt, print_type); continue; } else if (*fmt == 'c') /* Character parameter */ { register int larg; if (to == end) break; larg = va_arg(ap, int); *to++= (char) larg; continue; } /* We come here on '%%', unknown code or too long parameter */ if (to == end) break; *to++='%'; /* % used as % or unknown code */ } DBUG_ASSERT(to <= end); *to='\0'; /* End of errmessage */ return (size_t) (to - start); } /* Limited snprintf() implementations exported to plugins as a service, see the detailed documentation around my_snprintf_service_st */ size_t my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap) { return my_vsnprintf_ex(&my_charset_latin1, to, n, fmt, ap); } size_t my_snprintf(char* to, size_t n, const char* fmt, ...) { size_t result; va_list args; va_start(args,fmt); result= my_vsnprintf(to, n, fmt, args); va_end(args); return result; }
drakeos/Dracore
dep/mysqllite/strings/my_vsnprintf.c
C
gpl-2.0
18,158
/* * linux/drivers/video/ti81xx/vpss/dctrl.c * * VPSS display controller driver for TI 81XX * * Copyright (C) 2009 TI * Author: Yihe Hu <yihehu@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define VPSS_SUBMODULE_NAME "DCTRL" #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <mach/board-ti816x.h> #include "core.h" #include "system.h" #include "dc.h" static struct vps_dispctrl *disp_ctrl; static void *dc_handle; static struct vps_payload_info *dc_payload_info; /*store the current VENC setting*/ static struct vps_dcvencinfo venc_info = { { {VPS_DC_VENC_HDMI, {FVID2_STD_1080P_60, 1920, 1080, FVID2_SF_PROGRESSIVE,\ 148500, 60, 88, 148, 44, 4, 36, 5}, 3, 0, 0}, {VPS_DC_VENC_DVO2, {FVID2_STD_1080P_60, 1920, 1080, FVID2_SF_PROGRESSIVE,\ 148500, 60, 88, 148, 44, 4, 36, 5}, 3, 0, 0}, {VPS_DC_VENC_SD, {FVID2_STD_NTSC, 720, 480, FVID2_SF_INTERLACED,\ 216000, 60, 12, 68, 64, 5, 41, 5}, 0, 0, 0}, {VPS_DC_VENC_HDCOMP, {FVID2_STD_1080I_60, 1920, 1080, FVID2_SF_PROGRESSIVE,\ 74250, 60, 88, 148, 44, 2, 15, 5}, 2, 0, 0} }, 0, VPS_DC_MAX_VENC, }; /********************************************************** The name in the followig arrays are the value used in the sysfs. **********************************************************/ /*store the current mode info*/ static const struct dc_vencmode_info vmode_info[] = { {"ntsc", FVID2_STD_NTSC, {FVID2_STD_NTSC, 720, 480, FVID2_SF_INTERLACED, 216000, 60, 12, 68, 64, 5, 41, 5} }, {"pal", FVID2_STD_PAL, {FVID2_STD_PAL, 720, 576, FVID2_SF_INTERLACED, 216000, 50, 16, 58, 64, 6, 31, 6} }, {"1080p-60", FVID2_STD_1080P_60, {FVID2_STD_1080P_60, 1920, 1080, FVID2_SF_PROGRESSIVE, 148500, 60, 88, 148, 44, 4, 36, 5} }, {"1920x1080@60", FVID2_STD_1080P_60, {FVID2_STD_CUSTOM, 1920, 1080, FVID2_SF_PROGRESSIVE, 148500, 60, 88, 148, 44, 4, 36, 5} }, {"1080p-50", FVID2_STD_1080P_50, {FVID2_STD_1080P_50, 1920, 1080, FVID2_SF_PROGRESSIVE, 148500, 50, 528, 148, 44, 4, 36, 5} }, {"1920x1080@50", FVID2_STD_1080P_50, {FVID2_STD_CUSTOM, 1920, 1080, FVID2_SF_PROGRESSIVE, 148500, 50, 528, 148, 44, 4, 36, 5} }, {"1080p-30", FVID2_STD_1080P_30, {FVID2_STD_1080P_30, 1920, 1080, FVID2_SF_PROGRESSIVE, 74250, 30, 88, 148, 44, 4, 36, 5} }, {"1920x1080@30", FVID2_STD_1080P_30, {FVID2_STD_CUSTOM, 1920, 1080, FVID2_SF_PROGRESSIVE, 74250, 30, 88, 148, 44, 4, 36, 5} }, {"720p-60", FVID2_STD_720P_60, {FVID2_STD_720P_60, 1280, 720, FVID2_SF_PROGRESSIVE, 74250, 60, 110, 220, 40, 5, 20, 5} }, {"1280x720@60", FVID2_STD_720P_60, {FVID2_STD_CUSTOM, 1280, 720, FVID2_SF_PROGRESSIVE, 74250, 60, 110, 220, 40, 5, 20, 5} }, {"720p-50", FVID2_STD_720P_50, {FVID2_STD_720P_50, 1280, 720, FVID2_SF_PROGRESSIVE, 74250, 50, 440, 220, 40, 5, 20, 5} }, {"1280x720@50", FVID2_STD_720P_50, { FVID2_STD_CUSTOM, 1280, 720, FVID2_SF_PROGRESSIVE, 74250, 50, 440, 220, 40, 5, 20, 5} }, {"1080i-60", FVID2_STD_1080I_60, {FVID2_STD_1080I_60, 1920, 1080, FVID2_SF_INTERLACED, 74250, 60, 88, 148, 44, 2, 15, 5} }, {"1920x1080@60i", FVID2_STD_1080I_60, {FVID2_STD_CUSTOM, 1920, 1080, FVID2_SF_INTERLACED, 74250, 60, 88, 148, 44, 2, 15, 5} }, {"1080i-50", FVID2_STD_1080I_50, {FVID2_STD_1080I_50, 1920, 1080, FVID2_SF_INTERLACED, 742500, 50, 528, 148, 44, 2, 15, 5} }, {"1920x1080@50i", FVID2_STD_1080I_50, {FVID2_STD_CUSTOM, 1920, 1080, FVID2_SF_INTERLACED, 742500, 50, 528, 148, 44, 2, 15, 5} }, /*VGA*/ {"640x480@60", FVID2_STD_VGA_60, {FVID2_STD_CUSTOM, 640, 480, FVID2_SF_PROGRESSIVE, 25088, 60, 16, 48, 96, 10, 33, 2} }, {"640x480@72", FVID2_STD_VGA_72, {FVID2_STD_CUSTOM, 640, 480, FVID2_SF_PROGRESSIVE, 31488, 72, 24, 128, 40, 9, 29, 2} }, {"640x480@75", FVID2_STD_VGA_75, {FVID2_STD_CUSTOM, 640, 480, FVID2_SF_PROGRESSIVE, 31488, 75, 16, 120, 64, 1, 16, 3} }, {"640x480@85", FVID2_STD_VGA_85, {FVID2_STD_CUSTOM, 640, 480, FVID2_SF_PROGRESSIVE, 35840, 85, 56, 80, 56, 1, 25, 3} }, /*SVGA*/ {"800x600@60", FVID2_STD_SVGA_60, {FVID2_STD_CUSTOM, 800, 600, FVID2_SF_PROGRESSIVE, 39936, 60, 40, 88, 128, 1, 23, 4} }, {"800x600@72", FVID2_STD_SVGA_72, {FVID2_STD_CUSTOM, 800, 600, FVID2_SF_PROGRESSIVE, 49920, 72, 56, 64, 120, 37, 23, 6} }, {"800x600@75", FVID2_STD_SVGA_75, {FVID2_STD_CUSTOM, 800, 600, FVID2_SF_PROGRESSIVE, 49400, 75, 16, 160, 80, 1, 21, 3} }, {"800x600@85", FVID2_STD_SVGA_85, {FVID2_STD_CUSTOM, 800, 600, FVID2_SF_PROGRESSIVE, 56000, 85, 32, 152, 64, 1, 27, 3} }, /*XGA*/ {"1024x768@60", FVID2_STD_XGA_60, {FVID2_STD_XGA_60, 1024, 768, FVID2_SF_PROGRESSIVE, 65000, 60, 24, 160, 136, 3, 29, 6} }, {"1024x768@70", FVID2_STD_XGA_70, {FVID2_STD_CUSTOM, 1024, 768, FVID2_SF_PROGRESSIVE, 74752, 70, 24, 144, 136, 3, 29, 6} }, {"1024x768@75", FVID2_STD_XGA_75, {FVID2_STD_XGA_75, 1024, 768, FVID2_SF_PROGRESSIVE, 78720, 75, 16, 176, 96, 1, 28, 3} }, {"1024x768@85", FVID2_STD_XGA_85, {FVID2_STD_CUSTOM, 1024, 768, FVID2_SF_PROGRESSIVE, 94464, 85, 48, 208, 96, 1, 36, 3} }, /*SXGA*/ {"1280x1024@60", FVID2_STD_SXGA_60, {FVID2_STD_SXGA_60, 1280, 1024, FVID2_SF_PROGRESSIVE, 108000, 60, 48, 248, 112, 1, 38, 3} }, {"1280x1024@75", FVID2_STD_SXGA_75, {FVID2_STD_SXGA_75, 1280, 1024, FVID2_SF_PROGRESSIVE, 135000, 75, 16, 248, 144, 1, 38, 3} }, {"1280x1024@85", FVID2_STD_SXGA_85, {FVID2_STD_CUSTOM, 1280, 1024, FVID2_SF_PROGRESSIVE, 157440, 85, 64, 224, 160, 1, 44, 3} }, /*UXGA*/ {"1600x1200@60", FVID2_STD_UXGA_60, {FVID2_STD_UXGA_60, 1600, 1200, FVID2_SF_PROGRESSIVE, 162000, 60, 64, 304, 192, 1, 46, 3} } }; /*use for the venc name*/ static const struct dc_vencname_info venc_name[VPS_DC_MAX_VENC] = { {"hdmi", VPS_DC_VENC_HDMI, VPS_DC_HDMI_BLEND, HDMI}, {"dvo2", VPS_DC_VENC_DVO2, VPS_DC_DVO2_BLEND, DVO2}, {"sd", VPS_DC_VENC_SD, VPS_DC_SDVENC_BLEND, SDVENC}, {"hdcomp", VPS_DC_VENC_HDCOMP, VPS_DC_HDCOMP_BLEND, HDCOMP} }; /*use for pll sysfs*/ static const struct vps_sname_info pllvenc_name[] = { {"rfclk", VPS_SYSTEM_VPLL_OUTPUT_VENC_RF}, {"dclk", VPS_SYSTEM_VPLL_OUTPUT_VENC_D}, {"aclk", VPS_SYSTEM_VPLL_OUTPUT_VENC_A} }; /*used for clock source sysfs*/ static const struct vps_sname_info vclksrc_name[] = { {"dclk", VPS_DC_CLKSRC_VENCD}, {"dclkdiv2", VPS_DC_CLKSRC_VENCD_DIV2}, {"dclkdiff", VPS_DC_CLKSRC_VENCD_DIV2_DIFF}, {"aclk", VPS_DC_CLKSRC_VENCA}, {"aclkdiv2", VPS_DC_CLKSRC_VENCA_DIV2}, {"aclkdiff", VPS_DC_CLKSRC_VENCA_DIV2_DIFF} }; /*used for output sysfs*/ static const struct vps_sname_info dfmt_name[VPS_DC_DVOFMT_MAX] = { {"single", VPS_DC_DVOFMT_SINGLECHAN}, {"double", VPS_DC_DVOFMT_DOUBLECHAN}, {"triple", VPS_DC_DVOFMT_TRIPLECHAN_EMBSYNC}, {"triplediscrete", VPS_DC_DVOFMT_TRIPLECHAN_DISCSYNC}, {"doublediscrete", VPS_DC_DVOFMT_DOUBLECHAN_DISCSYNC}, }; /*used for output sysfs*/ static const struct vps_sname_info afmt_name[VPS_DC_A_OUTPUT_MAX] = { {"composite", VPS_DC_A_OUTPUT_COMPOSITE}, {"svideo", VPS_DC_A_OUTPUT_SVIDEO}, {"component", VPS_DC_A_OUTPUT_COMPONENT}, }; /*used for output sysfs*/ static const struct vps_sname_info datafmt_name[] = { {"rgb888", FVID2_DF_RGB24_888}, {"yuv444p", FVID2_DF_YUV444P}, {"yuv422spuv", FVID2_DF_YUV422SP_UV}, }; /*used for nodes sysfs*/ static const struct vps_sname_info dc_nodes[] = { {"main", VPS_DC_MAIN_INPUT_PATH}, /*0*/ {"vcompmux", VPS_DC_VCOMP_MUX}, /*1*/ {"hdcompmux", VPS_DC_HDCOMP_MUX}, /*2*/ {"sdmux", VPS_DC_SDVENC_MUX }, /*3*/ {"aux", VPS_DC_AUX_INPUT_PATH}, /*4*/ {"bp0", VPS_DC_BP0_INPUT_PATH}, /*5*/ {"bp1", VPS_DC_BP1_INPUT_PATH}, /*6*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*7*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*8*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*9*/ {"sd", VPS_DC_SEC1_INPUT_PATH}, /*10*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*11*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*12*/ {"dummy", VPS_DC_MAX_NODE_NUM}, /*13*/ {"vcomp", VPS_DC_VCOMP}, /*14*/ {"cigcons", VPS_DC_CIG_CONSTRAINED_OUTPUT}, /*15*/ {"cigin", VPS_DC_CIG_PIP_INPUT}, /*16*/ {"cigncons", VPS_DC_CIG_NON_CONSTRAINED_OUTPUT}, /*17*/ {"cigout", VPS_DC_CIG_PIP_OUTPUT}, /*18*/ {"grpx0", VPS_DC_GRPX0_INPUT_PATH}, /*19*/ {"grpx1", VPS_DC_GRPX1_INPUT_PATH}, /*20*/ {"grpx2", VPS_DC_GRPX2_INPUT_PATH}, /*21*/ {"hdmi", VPS_DC_HDMI_BLEND}, /*22*/ #ifdef CONFIG_ARCH_TI816X {"hdcomp", VPS_DC_HDCOMP_BLEND}, /*23*/ {"dvo2", VPS_DC_DVO2_BLEND}, /*24*/ {"sd", VPS_DC_SDVENC_BLEND}, /*25*/ #else {"dvo2", VPS_DC_DVO2_BLEND}, /*23*/ {"sd", VPS_DC_SDVENC_BLEND}, /*24*/ #endif }; /*S***************************private funtions*******************/ static inline void dc_lock(struct vps_dispctrl *dctrl) { mutex_lock(&dctrl->dcmutex); } static inline void dc_unlock(struct vps_dispctrl *dctrl) { mutex_unlock(&dctrl->dcmutex); } static inline bool isdigitalvenc(int vid) { if ((vid == VPS_DC_VENC_HDMI) || (vid == VPS_DC_VENC_DVO2)) return true; return false; } static inline bool isdigitalclk(u32 clk) { if ((clk == VPS_DC_CLKSRC_VENCA) || (clk == VPS_DC_CLKSRC_VENCA_DIV2) || (clk == VPS_DC_CLKSRC_VENCA_DIV2_DIFF)) return false; return true; } static inline bool isvalidclksrc(int vid, enum vps_dcvencclksrcsel clk) { if ((vid == VPS_DC_VENC_HDMI) && (!isdigitalclk(clk))) return false; /*ti814x, DVO2 must be aclk*/ if (cpu_is_ti814x()) { if ((vid == VPS_DC_VENC_DVO2) && (isdigitalclk(clk))) return false; } return true; } static inline bool isvalidmode(int vid, int mid) { switch (vid) { case VPS_DC_VENC_HDMI: if (cpu_is_ti816x()) case VPS_DC_VENC_HDCOMP: case VPS_DC_VENC_DVO2: if ((mid == FVID2_STD_NTSC) || (mid == FVID2_STD_PAL)) return false; break; case VPS_DC_VENC_SD: if (!((mid == FVID2_STD_NTSC) || (mid == FVID2_STD_PAL))) return false; break; } return true; } /*get the clock venc*/ static inline u32 get_plloutputvenc(int bidx) { struct vps_dcvencclksrc clksrc = disp_ctrl->blenders[bidx].clksrc; if (bidx == SDVENC) return VPS_SYSTEM_VPLL_OUTPUT_VENC_RF; if (cpu_is_ti814x()) return VPS_SYSTEM_VPLL_OUTPUT_VENC_D; if (isdigitalclk(clksrc.clksrc)) return VPS_SYSTEM_VPLL_OUTPUT_VENC_D; else return VPS_SYSTEM_VPLL_OUTPUT_VENC_A; } /*get the pixel clock for the standard mode*/ static inline int get_pllclock(u32 mid, u32 *freq) { int i; for (i = 0; i < ARRAY_SIZE(vmode_info); i++) { if (vmode_info[i].standard == mid) { *freq = vmode_info[i].minfo.pixelclock; if ((mid == FVID2_STD_NTSC) || (mid == FVID2_STD_PAL)) { if (cpu_is_ti814x()) *freq = 54000; } return 0; } } return -EINVAL; } /*get the current format based on the mode id*/ static int dc_get_format_from_mid(int mid, u32 *width, u32 *height, u8 *scformat) { int i; for (i = 0; i < ARRAY_SIZE(vmode_info); i++) { if (mid == vmode_info[i].standard) { *width = vmode_info[i].minfo.width; *height = vmode_info[i].minfo.height; *scformat = vmode_info[i].minfo.scanformat; return 0; } } return -EINVAL; } static int dc_get_timing(int mid, struct fvid2_modeinfo *minfo) { int i; for (i = 0; i < ARRAY_SIZE(vmode_info); i++) { if (mid == vmode_info[i].standard) { memcpy(minfo, &vmode_info[i].minfo, sizeof(*minfo)); if ((mid == FVID2_STD_NTSC) || (mid == FVID2_STD_PAL)) { if (cpu_is_ti814x()) minfo->pixelclock = 54000; } return 0; } } return -EINVAL; } /*get the index of the desired venc id in the database*/ static int get_idx_from_vid(int vid, int *idx) { int i; for (i = 0; i < disp_ctrl->numvencs; i++) { if (vid == venc_name[i].vid) { *idx = venc_name[i].idx; return 0; } } return -EINVAL; } /*get the venc id based on the name*/ static int dc_get_vencid(char *vname, int *vid) { int i; for (i = 0; i < disp_ctrl->numvencs; i++) { const struct dc_vencname_info *vnid = &venc_name[i]; if (sysfs_streq(vname, vnid->name)) { *vid = vnid->vid; return 0; } } return -1; } /*get the blender id from the indx */ static int get_bid_from_idx(int idx, int *bid) { int i; for (i = 0; i < disp_ctrl->numvencs; i++) { const struct dc_vencname_info *vnid = &venc_name[i]; if (vnid->idx == idx) { *bid = vnid->bid; return 0; } } return -1; } /*get the mode id based on the mode name*/ static int dc_get_modeid(char *mname, int *mid) { int i; for (i = 0; i < ARRAY_SIZE(vmode_info); i++) { const struct dc_vencmode_info *vinfo = &vmode_info[i]; if (sysfs_streq(mname, vinfo->name)) { *mid = vinfo->standard; return 0; } } return -1; } /*get the node id based on the name*/ static int dc_get_nodeid(char *name, int *nid) { int i; if (sysfs_streq(name, "dummy")) return -EINVAL; for (i = 0; i < ARRAY_SIZE(dc_nodes); i++) { const struct vps_sname_info *ninfo = &dc_nodes[i]; if (sysfs_streq(name, ninfo->name)) { *nid = ninfo->value; return 0; } } return -EINVAL; } /*get the venc information from M3*/ static int dc_get_vencinfo(struct vps_dcvencinfo *vinfo) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; memcpy(disp_ctrl->vinfo, vinfo, sizeof(struct vps_dcvencinfo)); r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_VENC_MODE, (void *)disp_ctrl->vinfo_phy, NULL); if (r) VPSSERR("failed to get venc info.\n"); else memcpy(vinfo, disp_ctrl->vinfo, sizeof(struct vps_dcvencinfo)); return r; } /*is venc running*/ static int dc_isvencrunning(int vid) { struct vps_dcvencinfo vinfo; int r = 0; vinfo.numvencs = 1; vinfo.modeinfo[0].vencid = vid; r = dc_get_vencinfo(&vinfo); if (!r) return vinfo.modeinfo[0].isvencrunning; return r; } /*Get the current VENC output info*/ static int dc_get_output(struct vps_dcoutputinfo *oinfo) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; /*get the venc output info*/ disp_ctrl->opinfo->vencnodenum = oinfo->vencnodenum; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_VENC_OUTPUT, (void *)disp_ctrl->opinfo_phy, NULL); if (r) VPSSERR("failed to get venc output info\n"); else memcpy(oinfo, disp_ctrl->opinfo, sizeof(struct vps_dcoutputinfo)); return r; } /*Set the VENC outputs*/ static int dc_set_output(struct vps_dcoutputinfo *oinfo) { int r; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; memcpy(disp_ctrl->opinfo, oinfo, sizeof(struct vps_dcoutputinfo)); r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_VENC_OUTPUT, (void *)disp_ctrl->opinfo_phy, NULL); if (r) VPSSERR("failed to set venc output\n"); return r; } /*set up the pll clock*/ static int dc_set_pllclock(int bidx, u32 clock) { struct vps_systemvpllclk pll; int r = 0; /*FIXME: call function of HDMI driver to set HDMI for ti814x*/ if (cpu_is_ti814x() && (bidx == HDMI)) return r; pll.outputvenc = (enum vps_vplloutputclk)get_plloutputvenc(bidx); pll.outputclk = clock; r = vps_system_setpll(&pll); return r; } static int dc_set_pll_by_mid(int bidx, int mid) { int r = 0; u32 clock; r = get_pllclock(mid, &clock); if (r) { VPSSERR("nonexit mode %d\n", mid); return r; } r = dc_set_pllclock(bidx, clock); return r; } /*get the clock source*/ static int dc_get_clksrc(struct vps_dcvencclksrc *clksrc) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; disp_ctrl->clksrc->venc = clksrc->venc; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_VENC_CLK_SRC, (void *)disp_ctrl->clksrc_phy, NULL); if (r) VPSSERR("get clock source failed\n"); else clksrc->clksrc = disp_ctrl->clksrc->clksrc; return r; } /*set the clock source*/ static int dc_set_clksrc(struct vps_dcvencclksrc *clksrc) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; if (!isvalidclksrc(clksrc->venc, clksrc->clksrc)) { VPSSERR("invalid clock source\n"); return -EINVAL; } disp_ctrl->clksrc->venc = clksrc->venc; disp_ctrl->clksrc->clksrc = clksrc->clksrc; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_VENC_CLK_SRC, (void *)disp_ctrl->clksrc_phy, NULL); if (r) VPSSERR("set clock source failed\n"); return r; } /*get the format based on the venc id*/ static int dc_get_format_from_vid(int vid, u32 *width, u32 *height, u8 *scformat) { int r = 0; struct vps_dcvencinfo vinfo; vinfo.numvencs = 1; vinfo.modeinfo[0].vencid = vid; r = dc_get_vencinfo(&vinfo); if (r) return -EINVAL; if (vinfo.modeinfo[0].minfo.standard == FVID2_STD_CUSTOM) { *width = vinfo.modeinfo[0].minfo.width; *height = vinfo.modeinfo[0].minfo.height; *scformat = vinfo.modeinfo[0].minfo.scanformat; } else { r = dc_get_format_from_mid(vinfo.modeinfo[0].minfo.standard, width, height, scformat); } return 0; } /*get the format based on the blender id*/ static int dc_get_format_from_bid(int bid, u32 *width, u32 *height, u8 *scformat) { int i; int r = -EINVAL; for (i = 0; i < disp_ctrl->numvencs; i++) { if (bid == venc_name[i].bid) { r = dc_get_format_from_vid(venc_name[i].vid, width, height, scformat); break; } } return r; } /*disable the desired vencs*/ static int dc_venc_disable(int vid) { int i = 0; int r = 0; struct vps_dcvencinfo vinfo; int venc_ids = vid; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; if (vid == 0) return 0; if (vid & (~disp_ctrl->vencmask)) { VPSSERR("wrong venc id.\n"); return -EINVAL; } VPSSDBG("enter venc disable\n"); vinfo.numvencs = 0; /*get the id of each venc to be disabled*/ while (venc_ids >> i) { if ((venc_ids >> i++) & 1) vinfo.modeinfo[vinfo.numvencs++].vencid = 1 << (i - 1); } r = dc_get_vencinfo(&vinfo); if (r) { VPSSERR("faild to get venc info.\n"); return r; } venc_ids = vid; for (i = 0; i < vinfo.numvencs; i++) { if (vinfo.modeinfo[i].isvencrunning == 0) { VPSSERR("venc %d already stop\n", vinfo.modeinfo[i].vencid); venc_ids &= ~vinfo.modeinfo[i].vencid; } } if (venc_ids && !r) { *disp_ctrl->dis_vencs = venc_ids; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_DISABLE_VENC, (void *)disp_ctrl->dis_vencsphy, NULL); if (r == 0) { disp_ctrl->enabled_venc_ids &= ~venc_ids; if (disp_ctrl->tiedvenc) { disp_ctrl->tiedvenc &= ~venc_ids; venc_ids = 0; i = 0; /*calculate how vencs left in tied list*/ while (disp_ctrl->tiedvenc >> i) { if ((disp_ctrl->tiedvenc >> i++) & 1) venc_ids++; } /*if one venc left,set tiedvenc to zero*/ if (venc_ids == 1) disp_ctrl->tiedvenc = 0; } } else VPSSERR("failed to disable the venc.\n"); } return r; } /*set the mode for desired vencs*/ static int dc_set_vencmode(struct vps_dcvencinfo *vinfo) { int i, r = 0; int vencs = 0; struct vps_dcvencinfo vi; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; /*get the current setting based on the app inputs*/ for (i = 0; i < vinfo->numvencs; i++) vi.modeinfo[i].vencid = vinfo->modeinfo[i].vencid; vi.numvencs = vinfo->numvencs; r = dc_get_vencinfo(&vi); if (r) { VPSSERR("failed to get venc info.\n"); goto exit; } /*make sure current venc status is matching */ disp_ctrl->vinfo->numvencs = 0; disp_ctrl->vinfo->tiedvencs = 0; for (i = 0; i < vinfo->numvencs; i++) { if (vi.modeinfo[i].isvencrunning) { if (vi.modeinfo[i].minfo.standard != vinfo->modeinfo[i].minfo.standard) { r = -EINVAL; VPSSERR("venc %d already running with \ different mode\n", vi.modeinfo[i].vencid); goto exit; } else VPSSDBG("venc %d already running\n", vi.modeinfo[i].vencid); } else { memcpy(&disp_ctrl->vinfo->modeinfo \ [disp_ctrl->vinfo->numvencs++], &vinfo->modeinfo[i], sizeof(struct vps_dcmodeinfo)); vencs |= vinfo->modeinfo[i].vencid; } } if (vinfo->tiedvencs) { if ((vencs & vinfo->tiedvencs) != vinfo->tiedvencs) { r = -EINVAL; VPSSERR("can not tied venc\n"); goto exit; } else disp_ctrl->vinfo->tiedvencs = vinfo->tiedvencs; } if (disp_ctrl->vinfo->numvencs) { /*set the VENC Mode*/ r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_VENC_MODE, (void *)disp_ctrl->vinfo_phy, NULL); if (r) { VPSSERR("failed to set venc mdoe.\n"); goto exit; } disp_ctrl->enabled_venc_ids |= vencs; } exit: return r; } static int dc_enum_node_input(struct vps_dispctrl *dctrl, struct vps_dcenumnodeinput *eninput) { int r = 0; if ((dctrl == NULL) || (dctrl->fvid2_handle == NULL)) return -EINVAL; *dctrl->dceninput = *eninput; r = vps_fvid2_control(dctrl->fvid2_handle, IOCTL_VPS_DCTRL_ENUM_NODE_INPUTS, (void *)dctrl->dceninput_phy, NULL); if (!r) *eninput = *dctrl->dceninput; return r; } static int dc_get_node_status(struct vps_dispctrl *dctrl, struct vps_dcnodeinput *ninput) { int r = 0; if ((dctrl == NULL) || (dctrl->fvid2_handle == NULL)) return -EINVAL; *dctrl->nodeinfo = *ninput; r = vps_fvid2_control(dctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_NODE_INPUT_STATUS, (void *)dctrl->ninfo_phy, NULL); if (r) VPSSERR("failed to get node status\n"); else *ninput = *dctrl->nodeinfo; return r; } static int dc_get_comp_rtconfig(struct vps_dispctrl *dctrl, struct vps_dccomprtconfig *compcfg) { int r = 0; if ((dctrl == NULL) || (dctrl->fvid2_handle == NULL)) return -EINVAL; dctrl->comprtcfg->nodeid = compcfg->nodeid; r = vps_fvid2_control(dctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_COMP_RTCONFIG, (void *)dctrl->comprtcfg_phy, NULL); if (r) VPSSERR("Get compositor RT config failed\n"); else *compcfg = *dctrl->comprtcfg; return r; } static int dc_set_comp_rtconfig(struct vps_dispctrl *dctrl, struct vps_dccomprtconfig *compcfg) { int r = 0; if ((dctrl == NULL) || (dctrl->fvid2_handle == NULL)) return -EINVAL; *dctrl->comprtcfg = *compcfg; r = vps_fvid2_control(dctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_COMP_RTCONFIG, (void *)dctrl->comprtcfg_phy, NULL); if (r) VPSSERR("Set compositor RT config failed\n"); return r; } /*E******************************** private functions *********************/ /*S******************************* public functions *********************/ /*get the id(venc,blender,mode) based on the name*/ int vps_dc_get_id(char *name, int *id, enum dc_idtype type) { int r = -EINVAL; switch (type) { case DC_BLEND_ID: case DC_NODE_ID: r = dc_get_nodeid(name, id); break; case DC_VENC_ID: r = dc_get_vencid(name, id); break; case DC_MODE_ID: r = dc_get_modeid(name, id); break; } return r; } /*get the tied venc information*/ int vps_dc_get_tiedvenc(u8 *tiedvenc) { *tiedvenc = disp_ctrl->tiedvenc; return 0; } /*set the streaming on the blender, not used*/ void vps_dc_set_actnodes(u8 setflag, u8 bidx) { struct dc_blender_info *binfo = &disp_ctrl->blenders[bidx]; if (setflag) binfo->actnodes++; else if (binfo->actnodes != 0) binfo->actnodes--; } /*get the venc infor for the desired vencs*/ int vps_dc_get_vencinfo(struct vps_dcvencinfo *vinfo) { int r; dc_lock(disp_ctrl); r = dc_get_vencinfo(vinfo); dc_unlock(disp_ctrl); return r; } /*get the node name based on the id*/ int vps_dc_get_node_name(int id, char *name) { int i; for (i = 0; i < ARRAY_SIZE(dc_nodes); i++) { const struct vps_sname_info *ninfo = &dc_nodes[i]; if (id == ninfo->value) { strcpy(name, (char *)ninfo->name); return 0; } } return -EINVAL; } /*set dc config not used now*/ int vps_dc_set_config(struct vps_dcconfig *usercfg, int setflag) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; if (usercfg->vencinfo.numvencs > disp_ctrl->numvencs) { VPSSERR("num vens (%d) over max\n", usercfg->vencinfo.numvencs); return -EINVAL; } if (usercfg->vencinfo.tiedvencs & (~disp_ctrl->tiedvenc)) { VPSSERR("tied venc not match.\n"); return -EINVAL; } VPSSDBG("enter set config\n"); dc_lock(disp_ctrl); memcpy(disp_ctrl->dccfg, usercfg, sizeof(struct vps_dcconfig)); if (setflag) { r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_CONFIG, (void *)disp_ctrl->dccfg_phy, NULL); if (r) VPSSDBG("faield to set the DC config.\n"); } else { r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_CLEAR_CONFIG, (void *)disp_ctrl->dccfg_phy, NULL); if (r) VPSSDBG("faield to clear the DC config.\n"); } dc_unlock(disp_ctrl); return r; } /*get current venc output format*/ int vps_dc_get_outpfmt(int id, u32 *width, u32 *height, u8 *scformat, enum dc_idtype type) { int r; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; VPSSDBG("enter get output format\n"); dc_lock(disp_ctrl); if (type == DC_VENC_ID) r = dc_get_format_from_vid(id, width, height, scformat); else if (type == DC_BLEND_ID) r = dc_get_format_from_bid(id, width, height, scformat); else if (type == DC_MODE_ID) r = dc_get_format_from_mid(id, width, height, scformat); else r = -EINVAL; dc_unlock(disp_ctrl); return r; } /* set/clear the node path/edge */ int vps_dc_set_node(u8 nodeid, u8 inputid, u8 enable) { int r = 0; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL)) return -EINVAL; VPSSDBG("enter set node\n"); dc_lock(disp_ctrl); disp_ctrl->nodeinfo->nodeid = nodeid; disp_ctrl->nodeinfo->inputid = inputid; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_NODE_INPUT_STATUS, (void *)disp_ctrl->ninfo_phy, NULL); if (r) { VPSSERR("failed to get node input status\n"); goto exit; } if (disp_ctrl->nodeinfo->isenable == enable) { if (enable) VPSSDBG("node already connected\n"); else VPSSDBG("node already disconnected\n"); goto exit; } /*call ioctl to set/clear the node */ disp_ctrl->nodeinfo->isenable = enable; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_NODE_INPUT, (void *)disp_ctrl->ninfo_phy, NULL); if (r) VPSSERR("failed to enable node.\n"); exit: dc_unlock(disp_ctrl); return r; } int vps_dc_set_color(struct vps_dccigrtconfig *cigconfig) { int r; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL) || (cigconfig == NULL)) return -EINVAL; VPSSDBG("set color\n"); dc_lock(disp_ctrl); memcpy(disp_ctrl->cigcfg, cigconfig, sizeof(*cigconfig)); r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_SET_CIG_RTCONFIG, (void *)disp_ctrl->cigcfg_phy, NULL); if (r) VPSSERR("failed to set CIG color\n"); dc_unlock(disp_ctrl); return r; } int vps_dc_get_color(struct vps_dccigrtconfig *cigconfig) { int r; if ((disp_ctrl == NULL) || (disp_ctrl->fvid2_handle == NULL) || (cigconfig == NULL)) return -EINVAL; VPSSDBG("get color\n"); dc_lock(disp_ctrl); disp_ctrl->cigcfg->nodeid = cigconfig->nodeid; r = vps_fvid2_control(disp_ctrl->fvid2_handle, IOCTL_VPS_DCTRL_GET_CIG_RTCONFIG, (void *)disp_ctrl->cigcfg_phy, NULL); if (!r) memcpy(cigconfig, disp_ctrl->cigcfg, sizeof(*cigconfig)); else VPSSERR("failed to get cig color\n"); dc_unlock(disp_ctrl); return r; } int vps_dc_enum_node_input(struct vps_dcenumnodeinput *eninput) { int r = 0; VPSSDBG("enum node input\n"); dc_lock(disp_ctrl); r = dc_enum_node_input(disp_ctrl, eninput); dc_unlock(disp_ctrl); return r; } int vps_dc_get_node_status(struct vps_dcnodeinput *ninput) { int r = 0; VPSSDBG("get node status\n"); dc_lock(disp_ctrl); r = dc_get_node_status(disp_ctrl, ninput); dc_unlock(disp_ctrl); return r; } int vps_dc_get_timing(u32 bid, struct fvid2_modeinfo *tinfo) { int i; for (i = 0; i < disp_ctrl->numvencs; i++) { if (bid == venc_name[i].bid) { *tinfo = venc_info.modeinfo[i].minfo; if ((tinfo->standard == FVID2_STD_NTSC) || (tinfo->standard == FVID2_STD_PAL)) { if (cpu_is_ti814x()) tinfo->pixelclock = 54000; } return 0; } } return -EINVAL; } /*E********************************* public functions *****************/ /*sysfs function for blender starting from here*/ static ssize_t blender_mode_show(struct dc_blender_info *binfo, char *buf) { int i; u32 idx = binfo->idx; int l = 0; for (i = 0; i < ARRAY_SIZE(vmode_info); i++) { u32 standard = venc_info.modeinfo[idx].minfo.standard; if (standard == FVID2_STD_CUSTOM) { if (venc_info.modeinfo[idx].minfo.scanformat == FVID2_SF_INTERLACED) l = snprintf(buf, PAGE_SIZE, "%ux%u@%ui\n", venc_info.modeinfo[idx].minfo.width, venc_info.modeinfo[idx].minfo.height, venc_info.modeinfo[idx].minfo.fps); else l = snprintf(buf, PAGE_SIZE, "%ux%u@%u\n", venc_info.modeinfo[idx].minfo.width, venc_info.modeinfo[idx].minfo.height, venc_info.modeinfo[idx].minfo.fps); } else if (vmode_info[i].standard == standard) { l = snprintf(buf, PAGE_SIZE, "%s\n", vmode_info[i].name); break; } } return l; } static ssize_t blender_mode_store(struct dc_blender_info *binfo, const char *buf, size_t size) { int r = 0; u32 idx = binfo->idx; u32 mid; dc_lock(binfo->dctrl); /*venc should be stop before changes*/ if (dc_isvencrunning(venc_info.modeinfo[idx].vencid)) { VPSSERR("stop venc before changing mode\n"); r = -EINVAL; goto exit; } if (dc_get_modeid((char *)buf, &mid)) { VPSSERR("failed to get the mode %s.", buf); r = -EINVAL; goto exit; } /*make sure the mode is supported by the venc*/ if (!isvalidmode(venc_info.modeinfo[idx].vencid, mid)) goto exit; /*only set the PLL if it is auto mode*/ if (binfo->dctrl->automode) { r = dc_set_pll_by_mid(binfo->idx, mid); if (r) goto exit; } venc_info.modeinfo[idx].minfo.standard = mid; dc_get_timing(mid, &venc_info.modeinfo[idx].minfo); #ifdef CONFIG_ARCH_TI816X if (cpu_is_ti816x()) { if ((binfo->idx == HDCOMP) && (binfo->isdeviceon == true)) { if ((mid == FVID2_STD_1080P_60) || (mid == FVID2_STD_1080P_50)) r = pcf8575_ths7360_hd_enable( TI816X_THS7360_SF_TRUE_HD_MODE); else r = pcf8575_ths7360_hd_enable( TI816X_THS7360_SF_HD_MODE); if (r < 0) { VPSSERR("failed to set THS filter\n"); goto exit; } } } #endif r = size; exit: dc_unlock(binfo->dctrl); return r; } static ssize_t blender_timings_show(struct dc_blender_info *binfo, char *buf) { int r; struct fvid2_modeinfo *t; t = &venc_info.modeinfo[binfo->idx].minfo; r = snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u,%u\n", t->pixelclock, t->width, t->hfrontporch, t->hbackporch, t->hsynclen, t->height, t->vfrontporch, t->vbackporch, t->vsynclen, t->scanformat); return r; } static ssize_t blender_timings_store(struct dc_blender_info *binfo, const char *buf, size_t size) { int r = 0; struct fvid2_modeinfo t; u32 num; u32 vmode; if (binfo->idx == SDVENC) return -EINVAL; dc_lock(binfo->dctrl); /*venc should be stop before changes*/ if (dc_isvencrunning(venc_info.modeinfo[binfo->idx].vencid)) { VPSSERR("stop venc before changing timing\n"); r = -EINVAL; goto exit; } num = sscanf(buf, "%u,%u/%u/%u/%u,%u/%u/%u/%u,%u/%u", &t.pixelclock, &t.width, &t.hfrontporch, &t.hbackporch, &t.hsynclen, &t.height, &t.vfrontporch, &t.vbackporch, &t.vsynclen, &t.scanformat, &vmode); if (!((num == 11) || (num == 10))) { r = -EINVAL; VPSSERR("wrong timing input %d\n", num); goto exit; } /*if use did not assign mode, than we fix it to 1*/ if (num == 10) vmode = 1; memcpy(&venc_info.modeinfo[binfo->idx].minfo, &t, sizeof(t)); venc_info.modeinfo[binfo->idx].minfo.standard = FVID2_STD_CUSTOM; venc_info.modeinfo[binfo->idx].mode = vmode; /*calculate the refresh rate*/ venc_info.modeinfo[binfo->idx].minfo.fps = (t.pixelclock * 1000) / ((t.width + t.hfrontporch + t.hbackporch + t.hsynclen) * (t.height + t.vfrontporch + t.vbackporch + t.vsynclen)); if (t.scanformat == 0) venc_info.modeinfo[binfo->idx].minfo.fps *= 2; r = dc_set_pllclock(binfo->idx, t.pixelclock); if (r) { VPSSERR("failed to set %dKHz clock\n", t.pixelclock); r = -EINVAL; goto exit; } r = size; exit: dc_unlock(binfo->dctrl); return r; } static ssize_t blender_enabled_show(struct dc_blender_info *binfo, char *buf) { int r; struct vps_dcvencinfo vinfo; dc_lock(binfo->dctrl); vinfo.numvencs = 1; vinfo.modeinfo[0].vencid = venc_name[binfo->idx].vid; r = dc_get_vencinfo(&vinfo); if (r) { VPSSERR(" Failed to get venc infor\n"); r = -EINVAL; goto exit; } r = snprintf(buf, PAGE_SIZE, "%d\n", vinfo.modeinfo[0].isvencrunning); exit: dc_unlock(binfo->dctrl); return r; } static ssize_t blender_enabled_store(struct dc_blender_info *binfo, const char *buf, size_t size) { int enabled; int vid; int r = 0; enabled = simple_strtoul(buf, NULL, 10); dc_lock(disp_ctrl); /*get vid id*/ vid = venc_name[binfo->idx].vid; if (enabled == 0) { r = dc_venc_disable(vid); if (r) { VPSSERR("failed to disable %s venc\n", binfo->name); r = -EINVAL; goto exit; } } else { int idx; struct vps_dcvencinfo vinfo; get_idx_from_vid(vid, &idx); memcpy(&vinfo.modeinfo[0], &venc_info.modeinfo[idx], sizeof(struct vps_dcvencinfo)); vinfo.numvencs = 1; vinfo.tiedvencs = 0; r = dc_set_vencmode(&vinfo); if (r) { VPSSERR("failed to enable venc %s\n", binfo->name); r = -EINVAL; goto exit; } } r = size; exit: dc_unlock(disp_ctrl); return r; } static ssize_t blender_clksrc_show(struct dc_blender_info *binfo, char *buf) { int r = 0; struct vps_dcvencclksrc *clksrc = &binfo->clksrc; if (binfo->idx == SDVENC) { VPSSERR("no clock soure for SD VENC\n"); return -EINVAL; } dc_lock(binfo->dctrl); clksrc->venc = venc_name[binfo->idx].vid; r = dc_get_clksrc(clksrc); dc_unlock(binfo->dctrl); if (r) return r; else return snprintf(buf, PAGE_SIZE, "%s\n", vclksrc_name[clksrc->clksrc].name); } static ssize_t blender_clksrc_store(struct dc_blender_info *binfo, const char *buf, size_t size) { int r = 0, i; struct vps_dcvencclksrc clksrc; bool found = false; if (binfo->idx == SDVENC) { VPSSERR("no clock soure for SD VENC\n"); return -EINVAL; } dc_lock(binfo->dctrl); clksrc.venc = venc_name[binfo->idx].vid; if (dc_isvencrunning(clksrc.venc)) { VPSSERR("please stop venc before changing clock source\n"); r = -EINVAL; goto exit; } /*found the matching clock source*/ for (i = 0; i < ARRAY_SIZE(vclksrc_name); i++) { if (sysfs_streq(buf, vclksrc_name[i].name)) { clksrc.clksrc = vclksrc_name[i].value; found = true; break; } } /*set the clock source*/ if (found == true) { r = dc_set_clksrc(&clksrc); if (!r) { r = size; /*store back*/ binfo->clksrc.clksrc = clksrc.clksrc; } } else { r = -EINVAL; VPSSERR("invalid clock source input\n"); } exit: dc_unlock(binfo->dctrl); return r; } static ssize_t blender_output_show(struct dc_blender_info *binfo, char *buf) { struct vps_dcoutputinfo oinfo; int r = 0; int l = 0, i; oinfo.vencnodenum = venc_name[binfo->idx].vid; dc_lock(binfo->dctrl); r = dc_get_output(&oinfo); dc_unlock(binfo->dctrl); if (r) return -EINVAL; if (isdigitalvenc(oinfo.vencnodenum)) l += snprintf(buf + l, PAGE_SIZE - l, "%s", dfmt_name[oinfo.dvofmt].name); else l += snprintf(buf + l, PAGE_SIZE - l, "%s", afmt_name[oinfo.afmt].name); for (i = 0 ; i < ARRAY_SIZE(datafmt_name); i++) { if (datafmt_name[i].value == oinfo.dataformat) l += snprintf(buf + l, PAGE_SIZE - l, ",%s\n", datafmt_name[i].name); } return l; } static ssize_t blender_output_store(struct dc_blender_info *binfo, const char *buf, size_t size) { struct vps_dcoutputinfo oinfo; int r = 0; char *input = (char *)buf; char *ptr; enum vps_dcdigitalfmt dfmt = VPS_DC_DVOFMT_MAX; enum vps_dcanalogfmt afmt = VPS_DC_A_OUTPUT_MAX; enum fvid2_dataformat fmt = FVID2_DF_MAX; oinfo.vencnodenum = venc_name[binfo->idx].vid; dc_lock(binfo->dctrl); /*venc should be off before changed output*/ if (dc_isvencrunning(oinfo.vencnodenum)) { VPSSERR("please disable VENC before changing output\n"); r = -EINVAL; goto exit; } dc_get_output(&oinfo); /*process the input buf*/ while ((ptr = strsep(&input, ",")) != NULL) { int i; bool found; found = false; /*check data format first*/ for (i = 0; i < ARRAY_SIZE(datafmt_name); i++) { if (sysfs_streq(ptr, datafmt_name[i].name)) { fmt = datafmt_name[i].value; found = true; } } /*check digital format or analog format based on current venc*/ if (!found) { if (isdigitalvenc(oinfo.vencnodenum)) { for (i = 0; i < VPS_DC_DVOFMT_MAX; i++) if (sysfs_streq(ptr, dfmt_name[i].name)) { dfmt = dfmt_name[i].value; found = true; break; } } else { for (i = 0; i < VPS_DC_A_OUTPUT_MAX; i++) if (sysfs_streq(ptr, afmt_name[i].name)) { afmt = afmt_name[i].value; found = true; break; } } if (found == false) { VPSSERR("invalid output value %s\n", ptr); r = -EINVAL; goto exit; } } if (input == NULL) break; } /*make sure the input is right before send out to M3*/ if (isdigitalvenc(oinfo.vencnodenum)) { if ((dfmt == VPS_DC_DVOFMT_MAX) && (fmt == FVID2_DF_MAX)) { VPSSERR("no valid digital output settings\n"); r = -EINVAL; goto exit; } if (dfmt != VPS_DC_DVOFMT_MAX) oinfo.dvofmt = dfmt; } else { if ((afmt == VPS_DC_A_OUTPUT_MAX) && (fmt == FVID2_DF_MAX)) { VPSSERR("no valid analog output settings\n"); r = -EINVAL; goto exit; } if ((binfo->idx == SDVENC) && (afmt == VPS_DC_A_OUTPUT_COMPONENT)) { VPSSERR("component out not supported on sdvenc\n"); r = -EINVAL; goto exit; } if (afmt != VPS_DC_A_OUTPUT_MAX) oinfo.afmt = afmt; } if (fmt != FVID2_DF_MAX) oinfo.dataformat = fmt; r = dc_set_output(&oinfo); if (!r) r = size; exit: dc_unlock(binfo->dctrl); return r; } static ssize_t blender_order_show(struct dc_blender_info *binfo, char *buf) { int r; int l; struct vps_dccomprtconfig comprtcfg; r = get_bid_from_idx(binfo->idx, &comprtcfg.nodeid); if (r) return r; dc_lock(binfo->dctrl); r = dc_get_comp_rtconfig(binfo->dctrl, &comprtcfg); dc_unlock(binfo->dctrl); if (r) return r; if (comprtcfg.isglobalreorderenable) l = snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u\n", comprtcfg.isglobalreorderenable, comprtcfg.displayorder[0], comprtcfg.displayorder[1], comprtcfg.displayorder[2], comprtcfg.displayorder[3]); else l = snprintf(buf, PAGE_SIZE, "%u,%u\n", comprtcfg.isglobalreorderenable, comprtcfg.displayorder[0]); return l; } static ssize_t blender_order_store(struct dc_blender_info *binfo, const char *buf, size_t size) { int r; int num; struct vps_dccomprtconfig comprtcfg; r = get_bid_from_idx(binfo->idx, &comprtcfg.nodeid); if (r) return r; num = sscanf(buf, "%u,%u/%u/%u/%u", &comprtcfg.isglobalreorderenable, &comprtcfg.displayorder[0], &comprtcfg.displayorder[1], &comprtcfg.displayorder[2], &comprtcfg.displayorder[3]); /*error check*/ if (comprtcfg.isglobalreorderenable == 1) { if (num != 5) { VPSSERR("Wrong display re-order format\n"); return -EINVAL; } } else { if (num < 2) { VPSSERR("Wrong display re-order format\n"); return -EINVAL; } } dc_lock(binfo->dctrl); r = dc_set_comp_rtconfig(binfo->dctrl, &comprtcfg); if (!r) r = size; dc_unlock(binfo->dctrl); return r; } static ssize_t blender_name_show(struct dc_blender_info *binfo, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", venc_name[binfo->idx].name); } struct blender_attribute { struct attribute attr; ssize_t (*show)(struct dc_blender_info *, char *); ssize_t (*store)(struct dc_blender_info *, const char *, size_t); }; #define BLENDER_ATTR(_name, _mode, _show, _store) \ struct blender_attribute blender_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static BLENDER_ATTR(name, S_IRUGO, blender_name_show, NULL); static BLENDER_ATTR(mode, S_IRUGO | S_IWUSR, blender_mode_show, blender_mode_store); static BLENDER_ATTR(timings, S_IRUGO | S_IWUSR, blender_timings_show, blender_timings_store); static BLENDER_ATTR(enabled, S_IRUGO | S_IWUSR, blender_enabled_show, blender_enabled_store); static BLENDER_ATTR(output, S_IRUGO | S_IWUSR, blender_output_show, blender_output_store); static BLENDER_ATTR(clksrc, S_IRUGO | S_IWUSR, blender_clksrc_show, blender_clksrc_store); static BLENDER_ATTR(order, S_IRUGO | S_IWUSR, blender_order_show, blender_order_store); static struct attribute *blender_sysfs_attrs[] = { &blender_attr_mode.attr, &blender_attr_timings.attr, &blender_attr_enabled.attr, &blender_attr_output.attr, &blender_attr_clksrc.attr, &blender_attr_order.attr, &blender_attr_name.attr, NULL }; static ssize_t blender_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct dc_blender_info *binfo = NULL; struct blender_attribute *blend_attr = NULL; binfo = container_of(kobj, struct dc_blender_info, kobj); blend_attr = container_of(attr, struct blender_attribute, attr); if (!blend_attr->show) return -ENOENT; return blend_attr->show(binfo, buf); } static ssize_t blender_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct dc_blender_info *blend; struct blender_attribute *blend_attr; blend = container_of(kobj, struct dc_blender_info, kobj); blend_attr = container_of(attr, struct blender_attribute, attr); if (!blend_attr->store) return -ENOENT; return blend_attr->store(blend, buf, size); } static const struct sysfs_ops blender_sysfs_ops = { .show = blender_attr_show, .store = blender_attr_store, }; static struct kobj_type blender_ktype = { .sysfs_ops = &blender_sysfs_ops, .default_attrs = blender_sysfs_attrs, }; /*sysfs for the display controller*/ static ssize_t dctrl_pllclks_show(struct vps_dispctrl *dctrl, char *buf) { int r = 0, l = 0, i; struct vps_systemvpllclk pllclk; for (i = 0; i < VPS_SYSTEM_VPLL_OUTPUT_MAX_VENC; i++) { pllclk.outputvenc = (enum vps_vplloutputclk)i; /*no need for APLL for TI814X*/ if ((pllclk.outputvenc == VPS_SYSTEM_VPLL_OUTPUT_VENC_A) && cpu_is_ti814x()) continue; r = vps_system_getpll(&pllclk); if (r) return -EINVAL; if (i == 0) l += snprintf(buf + l, PAGE_SIZE - l, "%s:%d", pllvenc_name[i].name, pllclk.outputclk); else l += snprintf(buf + l, PAGE_SIZE - l, ",%s:%d", pllvenc_name[i].name, pllclk.outputclk); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); return l; } static ssize_t dctrl_pllclks_store(struct vps_dispctrl *dctrl, const char *buf, size_t size) { struct vps_systemvpllclk pllclk; char *input = (char *)buf, *this_opt; int r = 0; if (dctrl->automode) { VPSSERR("please turn off automode first\n"); return -EINVAL; } dc_lock(dctrl); while (!r && (this_opt = strsep(&input, ",")) != NULL) { char *p, *venc_str, *clk_str; int i; p = strchr(this_opt, ':'); if (!p) break; *p = 0; venc_str = this_opt; clk_str = p + 1; pllclk.outputvenc = VPS_SYSTEM_VPLL_OUTPUT_MAX_VENC; pllclk.outputclk = 0xFFFFFFFF; /*get the output venc*/ for (i = 0; i < VPS_SYSTEM_VPLL_OUTPUT_MAX_VENC; i++) { if (sysfs_streq(venc_str, pllvenc_name[i].name)) { pllclk.outputvenc = pllvenc_name[i].value; break; } } if (i == VPS_SYSTEM_VPLL_OUTPUT_MAX_VENC) { VPSSERR("wrong venc %s\n", venc_str); r = -EINVAL; goto exit; } if ((pllclk.outputvenc == VPS_SYSTEM_VPLL_OUTPUT_VENC_A) && cpu_is_ti814x()) { VPSSERR("Invalid VENCA PLL\n"); r = -EINVAL; goto exit; } /*get the pll clk*/ pllclk.outputclk = simple_strtoul((const char *)clk_str, NULL, 10); r = vps_system_setpll(&pllclk); if (r) VPSSERR("set freq %s for %s failed\n", clk_str, venc_str); if (input == NULL) break; } if (!r) r = size; exit: dc_unlock(dctrl); return r; } static ssize_t dctrl_automode_show(struct vps_dispctrl *dctrl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", dctrl->automode); } static ssize_t dctrl_automode_store(struct vps_dispctrl *dctrl, const char *buf, size_t size) { int enabled; enabled = simple_strtoul(buf, NULL, 10); dctrl->automode = (bool)enabled; return size; } static ssize_t dctrl_tiedvencs_show(struct vps_dispctrl *dctrl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", disp_ctrl->tiedvenc); } static ssize_t dctrl_tiedvencs_store(struct vps_dispctrl *dctrl, const char *buf, size_t size) { int r = 0; int vencs = 0; int i = 0; struct vps_dcvencinfo vinfo; dc_lock(disp_ctrl); vencs = simple_strtoul(buf, NULL, 10); if (vencs & ~disp_ctrl->vencmask) { r = -EINVAL; VPSSERR("vencs %d over limit\n", vencs); goto exit; } if ((vencs == 0) || (disp_ctrl->tiedvenc == vencs)) { r = size; goto exit; } vinfo.numvencs = 0; vinfo.tiedvencs = vencs; /*assemble the structure based on the venc id*/ while (vencs >> i) { /*get id of each venc to be tied*/ if ((vencs >> i++) & 1) { int idx; int vid = 1 << (i - 1); get_idx_from_vid(vid, &idx); memcpy(&vinfo.modeinfo[vinfo.numvencs++], &venc_info.modeinfo[idx], sizeof(struct vps_dcmodeinfo)); } } if (vinfo.numvencs < 2) { VPSSERR("at least 2 vencs to tied.\n"); r = -EINVAL; goto exit; } /*set the tied venc mode*/ r = dc_set_vencmode(&vinfo); if (r) { VPSSERR("failed to set tied venc\n"); r = -EINVAL; goto exit; } disp_ctrl->tiedvenc = vinfo.tiedvencs; r = size; exit: dc_unlock(disp_ctrl); return r; } struct dctrl_attribute { struct attribute attr; ssize_t (*show)(struct vps_dispctrl *, char *); ssize_t (*store)(struct vps_dispctrl *, const char *, size_t); }; #define DCTRL_ATTR(_name, _mode, _show, _store) \ struct dctrl_attribute dctrl_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static DCTRL_ATTR(tiedvencs, S_IRUGO | S_IWUSR, dctrl_tiedvencs_show, dctrl_tiedvencs_store); static DCTRL_ATTR(pllclks, S_IRUGO | S_IWUSR, dctrl_pllclks_show, dctrl_pllclks_store); static DCTRL_ATTR(automode, S_IRUGO | S_IWUSR, dctrl_automode_show, dctrl_automode_store); static struct attribute *dctrl_sysfs_attrs[] = { &dctrl_attr_tiedvencs.attr, &dctrl_attr_pllclks.attr, &dctrl_attr_automode.attr, NULL }; static ssize_t dctrl_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct vps_dispctrl *dctrl = NULL; struct dctrl_attribute *dctrl_attr = NULL; dctrl = container_of(kobj, struct vps_dispctrl, kobj); dctrl_attr = container_of(attr, struct dctrl_attribute, attr); if (!dctrl_attr->show) return -ENOENT; return dctrl_attr->show(dctrl, buf); } static ssize_t dctrl_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct vps_dispctrl *dctrl; struct dctrl_attribute *dctrl_attr; dctrl = container_of(kobj, struct vps_dispctrl, kobj); dctrl_attr = container_of(attr, struct dctrl_attribute, attr); if (!dctrl_attr->store) return -ENOENT; return dctrl_attr->store(dctrl, buf, size); } static const struct sysfs_ops dctrl_sysfs_ops = { .show = dctrl_attr_show, .store = dctrl_attr_store, }; static struct kobj_type dctrl_ktype = { .sysfs_ops = &dctrl_sysfs_ops, .default_attrs = dctrl_sysfs_attrs, }; /*end of sysfs function for display controller*/ static int parse_def_clksrc(const char *clksrc) { int r = 0, i; char *str, *options, *this_opt; if (clksrc == NULL) return 0; str = kmalloc(strlen(clksrc) + 1, GFP_KERNEL); strcpy(str, clksrc); options = str; VPSSDBG("clksrc %s\n", clksrc); while (!r && (this_opt = strsep(&options, ",")) != NULL) { char *p, *venc, *csrc; int vid, idx; struct dc_blender_info *binfo; p = strchr(this_opt, ':'); if (!p) break; *p = 0; venc = this_opt; csrc = p + 1; /*parse the clock source for each possible venc input*/ for (i = 0; i < ARRAY_SIZE(vclksrc_name); i++) { if (sysfs_streq(csrc, vclksrc_name[i].name)) { if (dc_get_vencid(venc, &vid)) { VPSSERR("wrong venc\n"); break; } /* no clock for SD VENC*/ if (vid == VPS_DC_VENC_SD) break; get_idx_from_vid(vid, &idx); binfo = &disp_ctrl->blenders[idx]; /*is valid clock source*/ if (isvalidclksrc(vid, vclksrc_name[i].value)) { binfo->clksrc.clksrc = vclksrc_name[i].value; } else VPSSERR("wrong clock source\n"); break; } } if (i == ARRAY_SIZE(vclksrc_name)) VPSSERR("wrong clock source\n"); if (options == NULL) break; } kfree(str); return r; } static int parse_def_modes(const char *mode) { char *str, *options, *this_opt; int r = 0; struct vps_dcvencinfo *vinfo = &venc_info; if (mode == NULL) return 0; str = kmalloc(strlen(mode) + 1, GFP_KERNEL); strcpy(str, mode); options = str; VPSSDBG("mode %s\n", mode); while (!r && (this_opt = strsep(&options, ",")) != NULL) { char *p, *display_str, *mode_str; int vid, mid; int idx; p = strchr(this_opt, ':'); if (!p) { r = -EINVAL; break; } *p = 0; display_str = this_opt; mode_str = p + 1; if (dc_get_vencid(display_str, &vid)) { VPSSERR("venc name(%s) not existing.\n", display_str); continue; } if (dc_get_modeid(mode_str, &mid)) { VPSSERR("venc mode(%s) is not supported.\n", mode_str); continue; } if (!isvalidmode(vid, mid)) continue; get_idx_from_vid(vid, &idx); vinfo->modeinfo[idx].vencid = vid; vinfo->modeinfo[idx].minfo.standard = mid; dc_get_timing(mid, &vinfo->modeinfo[idx].minfo); if (options == NULL) break; } kfree(str); return r; } void __init vps_dc_ctrl_init(struct vps_dispctrl *dctrl) { struct vps_dcedeconfig *edecfg = dctrl->dcedecfg; dctrl->dccreatecfg->edeconfig = (struct vps_dcedeconfig *)dctrl->dcede_phy; /*setup default ede values*/ edecfg->ltienable = 0; edecfg->horzpeaking = 0; edecfg->ctienable = 0; edecfg->transadjustenable = 0; edecfg->lumapeaking = 0; edecfg->chromapeaking = 0; edecfg->minclipluma = 0; edecfg->maxclipluma = 1023; edecfg->minclipchroma = 0; edecfg->maxclipchroma = 1023; edecfg->bypass = 0; } static inline int get_payload_size(void) { int size = 0; size = sizeof(struct vps_dccreateconfig); size += sizeof(struct vps_dcedeconfig); size += sizeof(u32); size += sizeof(struct vps_dcconfig); size += sizeof(struct vps_dcvencinfo); size += sizeof(struct vps_dcnodeinput); size += sizeof(struct vps_dcmodeinfo); size += sizeof(struct vps_dcoutputinfo); size += sizeof(struct vps_dcvencclksrc); size += sizeof(struct vps_dccigrtconfig); size += sizeof(struct vps_dcenumnodeinput); size += sizeof(struct vps_dccomprtconfig); size += sizeof(u32); /*this is for the disable venc command*/ /*FIXME add more here*/ return size; } static inline void assign_payload_addr(struct vps_dispctrl *dctrl, struct vps_payload_info *pinfo, u32 *buf_offset) { int offset = *buf_offset; /*dc create config*/ dctrl->dccreatecfg = (struct vps_dccreateconfig *)setaddr(pinfo, &offset, &dctrl->dccreate_phy, sizeof(struct vps_dccreateconfig)); /*ede config*/ dctrl->dcedecfg = (struct vps_dcedeconfig *)setaddr(pinfo, &offset, &dctrl->dcede_phy, sizeof(struct vps_dcedeconfig)); /*return status*/ dctrl->dcrtstatus = (u32 *)setaddr(pinfo, &offset, &dctrl->dcrtst_phy, sizeof(u32)); /*dc config */ dctrl->dccfg = (struct vps_dcconfig *)setaddr(pinfo, &offset, &dctrl->dccfg_phy, sizeof(struct vps_dcconfig)); /* venc info*/ dctrl->vinfo = (struct vps_dcvencinfo *)setaddr(pinfo, &offset, &dctrl->vinfo_phy, sizeof(struct vps_dcvencinfo)); /*node input*/ dctrl->nodeinfo = (struct vps_dcnodeinput *)setaddr( pinfo, &offset, &dctrl->ninfo_phy, sizeof(struct vps_dcnodeinput)); /*venc disable*/ dctrl->dis_vencs = (u32 *)setaddr(pinfo, &offset, &dctrl->dis_vencsphy, sizeof(u32)); /*venc output infor*/ dctrl->opinfo = (struct vps_dcoutputinfo *)setaddr( pinfo, &offset, &dctrl->opinfo_phy, sizeof(struct vps_dcoutputinfo)); /*venc clock source*/ dctrl->clksrc = (struct vps_dcvencclksrc *)setaddr( pinfo, &offset, &dctrl->clksrc_phy, sizeof(struct vps_dcvencclksrc)); /*CIG runtime configuration*/ dctrl->cigcfg = (struct vps_dccigrtconfig *)setaddr( pinfo, &offset, &dctrl->cigcfg_phy, sizeof(struct vps_dccigrtconfig)); /*DC enum node input*/ dctrl->dceninput = (struct vps_dcenumnodeinput *)setaddr( pinfo, &offset, &dctrl->dceninput_phy, sizeof(struct vps_dcenumnodeinput)); /*COMP RT Config*/ dctrl->comprtcfg = (struct vps_dccomprtconfig *) setaddr( pinfo, &offset, &dctrl->comprtcfg_phy, sizeof(struct vps_dccomprtconfig)); *buf_offset = offset; } int __init vps_dc_init(struct platform_device *pdev, const char *mode, int tied_vencs, const char *clksrc) { int r = 0; int i; int size = 0, offset = 0; VPSSDBG("dctrl init\n"); dc_payload_info = kzalloc(sizeof(struct vps_payload_info), GFP_KERNEL); if (!dc_payload_info) { VPSSERR("allocated payload info failed.\n"); return -ENOMEM; } /*allocate non-cacheable memory*/ size = get_payload_size(); dc_payload_info->vaddr = vps_sbuf_alloc(size, &dc_payload_info->paddr); if (dc_payload_info->vaddr == NULL) { VPSSERR("alloc dctrl dma buffer failed\n"); dc_payload_info->paddr = 0u; r = -ENOMEM; goto cleanup; } dc_payload_info->size = PAGE_ALIGN(size); memset(dc_payload_info->vaddr, 0, dc_payload_info->size); /*allocate display_control memory*/ disp_ctrl = kzalloc(sizeof(struct vps_dispctrl), GFP_KERNEL); if (disp_ctrl == NULL) { r = -ENOMEM; goto cleanup; } disp_ctrl->automode = true; disp_ctrl->numvencs = vps_get_numvencs(); venc_info.numvencs = disp_ctrl->numvencs; disp_ctrl->vencmask = (1 << VPS_DC_MAX_VENC) - 1; if (cpu_is_ti814x()) disp_ctrl->vencmask -= VPS_DC_VENC_HDCOMP; assign_payload_addr(disp_ctrl, dc_payload_info, &offset); vps_dc_ctrl_init(disp_ctrl); /*get dc handle*/ dc_handle = vps_fvid2_create(FVID2_VPS_DCTRL_DRV, VPS_DCTRL_INST_0, (void *)disp_ctrl->dccreate_phy, (void *)dc_payload_info->paddr, NULL); if (dc_handle == NULL) { VPSSDBG("Create FVID2 DC handle status 0x%08x.\n", *(u32 *)dc_payload_info->vaddr); r = -EINVAL; goto cleanup; } disp_ctrl->fvid2_handle = dc_handle; mutex_init(&disp_ctrl->dcmutex); r = kobject_init_and_add( &disp_ctrl->kobj, &dctrl_ktype, &pdev->dev.kobj, "system"); if (r) VPSSERR("failed to create dctrl sysfs file.\n"); /*create sysfs*/ for (i = 0; i < disp_ctrl->numvencs; i++) { struct dc_blender_info *blend = &disp_ctrl->blenders[i];; blend->idx = i; blend->actnodes = 0; blend->name = (char *)venc_name[i].name; blend->dctrl = disp_ctrl; blend->isdeviceon = true; r = kobject_init_and_add( &blend->kobj, &blender_ktype, &pdev->dev.kobj, "display%d", i); if (r) { VPSSERR("failed to create blender \ %d sysfs file.\n", i); continue; } } disp_ctrl->tiedvenc = tied_vencs; venc_info.tiedvencs = disp_ctrl->tiedvenc; /*parse the mode*/ r = parse_def_modes(mode); if (r) { VPSSERR("failed to parse mode.\n"); goto cleanup; } /*set up the default clksrc and output format*/ for (i = 0; i < disp_ctrl->numvencs; i++) { struct vps_dcvencclksrc *clksrcp = &disp_ctrl->blenders[i].clksrc; struct vps_dcoutputinfo opinfo; clksrcp->venc = venc_name[i].vid; /*set the venc output*/ opinfo.dvofidpolarity = VPS_DC_POLARITY_ACT_HIGH; opinfo.dvohspolarity = VPS_DC_POLARITY_ACT_HIGH; opinfo.dvovspolarity = VPS_DC_POLARITY_ACT_HIGH; opinfo.dvoactvidpolarity = VPS_DC_POLARITY_ACT_HIGH; switch (i) { case HDMI: opinfo.vencnodenum = VPS_DC_VENC_HDMI; opinfo.dvofmt = VPS_DC_DVOFMT_TRIPLECHAN_DISCSYNC; opinfo.dataformat = FVID2_DF_RGB24_888; if (cpu_is_ti816x() && (VPS_PLATFORM_CPU_REV_1_0 == vps_system_getcpurev())) clksrcp->clksrc = VPS_DC_CLKSRC_VENCD_DIV2; else clksrcp->clksrc = VPS_DC_CLKSRC_VENCD; break; case DVO2: opinfo.vencnodenum = VPS_DC_VENC_DVO2; opinfo.dvofmt = VPS_DC_DVOFMT_DOUBLECHAN; opinfo.dataformat = FVID2_DF_YUV422SP_UV; if (cpu_is_ti816x()) { if (VPS_PLATFORM_CPU_REV_1_0 == vps_system_getcpurev()) clksrcp->clksrc = VPS_DC_CLKSRC_VENCD_DIV2; else clksrcp->clksrc = VPS_DC_CLKSRC_VENCD; } else clksrcp->clksrc = VPS_DC_CLKSRC_VENCA; break; case SDVENC: opinfo.vencnodenum = VPS_DC_VENC_SD; if (cpu_is_ti816x()) opinfo.afmt = VPS_DC_A_OUTPUT_COMPOSITE; else opinfo.afmt = VPS_DC_A_OUTPUT_SVIDEO; opinfo.dataformat = FVID2_DF_RGB24_888; break; if (cpu_is_ti816x()) { case HDCOMP: opinfo.vencnodenum = VPS_DC_VENC_HDCOMP; opinfo.afmt = VPS_DC_A_OUTPUT_COMPONENT; opinfo.dataformat = FVID2_DF_YUV422SP_UV; clksrcp->clksrc = VPS_DC_CLKSRC_VENCA; break; } } r = dc_set_output(&opinfo); if (r) { VPSSERR("failed to set venc output\n"); goto cleanup; } } /*parse command line clksrc*/ r = parse_def_clksrc(clksrc); if (r) { VPSSERR("failed to parse clock source\n"); goto cleanup; } /*set the clock source*/ for (i = 0; i < venc_info.numvencs; i++) { if (disp_ctrl->blenders[i].idx != SDVENC) { r = dc_set_clksrc( &disp_ctrl->blenders[i].clksrc); if (r) { VPSSERR("failed to set clock resource"); goto cleanup; } } } /*config the PLL*/ for (i = 0; i < venc_info.numvencs; i++) { r = dc_set_pll_by_mid(i, venc_info.modeinfo[i].minfo.standard); if (r) { VPSSERR("failed to set pll"); goto cleanup; } } /*set the venc mode*/ r = dc_set_vencmode(&venc_info); if (r) { VPSSERR("Failed to set venc mode.\n"); goto cleanup; } /*set the the THS filter, device is still registered even if setup is failed*/ #ifdef CONFIG_ARCH_TI816X if (cpu_is_ti816x()) { r = pcf8575_ths7375_enable(TI816X_THSFILTER_ENABLE_MODULE); if ((venc_info.modeinfo[HDCOMP].minfo.standard == FVID2_STD_1080P_60) || (venc_info.modeinfo[HDCOMP].minfo.standard == FVID2_STD_1080P_50)) r |= pcf8575_ths7360_hd_enable( TI816X_THS7360_SF_TRUE_HD_MODE); else r |= pcf8575_ths7360_hd_enable( TI816X_THS7360_SF_HD_MODE); if (r < 0) { VPSSERR("setup 7375 filter failed\n"); disp_ctrl->blenders[HDCOMP].isdeviceon = false; } r = pcf8575_ths7360_sd_enable(TI816X_THSFILTER_ENABLE_MODULE); if (r < 0) { VPSSERR("setup 7360 filter failed.\n"); disp_ctrl->blenders[SDVENC].isdeviceon = false; } } #endif return 0; cleanup: vps_dc_deinit(pdev); return r; } int __exit vps_dc_deinit(struct platform_device *pdev) { int r = 0; int i; VPSSDBG("dctrl deinit\n"); if (disp_ctrl) { /*disable vencs*/ if (disp_ctrl->enabled_venc_ids != 0) { r = dc_venc_disable(disp_ctrl->vencmask); if (r) { VPSSERR("Failed to disable vencs.\n"); return r; } } kobject_del(&disp_ctrl->kobj); kobject_put(&disp_ctrl->kobj); for (i = 0; i < disp_ctrl->numvencs; i++) { kobject_del(&disp_ctrl->blenders[i].kobj); kobject_put(&disp_ctrl->blenders[i].kobj); } kfree(disp_ctrl); disp_ctrl = NULL; } if (dc_payload_info) { /*free memory*/ if (dc_payload_info->vaddr) vps_sbuf_free(dc_payload_info->paddr, dc_payload_info->vaddr, dc_payload_info->size); kfree(dc_payload_info); dc_payload_info = NULL; } if (dc_handle) { r = vps_fvid2_delete(dc_handle, NULL); if (r) { VPSSERR("failed to delete DC fvid2 handle.\n"); return r; } dc_handle = NULL; } return r; }
nazgee/igep-kernel
drivers/video/ti81xx/vpss/dctrl.c
C
gpl-2.0
61,473
/*------------------------------------------------------------------------- _fsreturnval.c - Floating point library in optimized assembly for 8051 Copyright (c) 2004, Paul Stoffregen, paul@pjrc.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this library; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. As a special exception, if you link this library with other files, some of which are compiled with SDCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. -------------------------------------------------------------------------*/ #define __SDCC_FLOAT_LIB #include <float.h> #ifdef FLOAT_ASM_MCS51 static void dummy(void) __naked { __asm .globl fs_round_and_return fs_round_and_return: #ifdef FLOAT_FULL_ACCURACY // discard the extra 8 bits of precision we kept around in r1 cjne r1, #128, 00001$ mov a, r2 rrc a cpl c 00001$: jc fs_zerocheck_return mov a, r2 add a, #1 mov r2, a clr a addc a, r3 mov r3, a clr a addc a, r4 mov r4, a jnc fs_zerocheck_return mov r4, #0x80 inc exp_a #endif .globl fs_zerocheck_return fs_zerocheck_return: // zero output is a special case cjne r4, #0, fs_direct_return cjne r3, #0, fs_direct_return cjne r2, #0, fs_direct_return .globl fs_return_zero fs_return_zero: clr a mov b, a mov dph, a mov dpl, a ret .globl fs_direct_return fs_direct_return: // collect all pieces and return mov c, sign_a mov a, exp_a rrc a mov b, r4 mov b.7, c mov dph, r3 mov dpl, r2 ret .globl fs_return_inf fs_return_inf: clr a mov dph, a mov dpl, a mov b, #0x80 cpl a mov c, sign_a rrc a ret .globl fs_return_nan fs_return_nan: clr a mov dph, a mov dpl, a mov b, #0xC0 mov a, #0x7F ret __endasm; } #endif
PinguinoIDE/pinguino-compilers
windows64/p8/share/sdcc/lib/src/_fsreturnval.c
C
gpl-2.0
2,557
// SPDX-License-Identifier: GPL-2.0-only /* * e750-wm9705.c -- SoC audio for e750 * * Copyright 2007 (c) Ian Molton <spyro@f2s.com> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <mach/audio.h> #include <mach/eseries-gpio.h> #include <asm/mach-types.h> static int e750_spk_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (event & SND_SOC_DAPM_PRE_PMU) gpio_set_value(GPIO_E750_SPK_AMP_OFF, 0); else if (event & SND_SOC_DAPM_POST_PMD) gpio_set_value(GPIO_E750_SPK_AMP_OFF, 1); return 0; } static int e750_hp_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (event & SND_SOC_DAPM_PRE_PMU) gpio_set_value(GPIO_E750_HP_AMP_OFF, 0); else if (event & SND_SOC_DAPM_POST_PMD) gpio_set_value(GPIO_E750_HP_AMP_OFF, 1); return 0; } static const struct snd_soc_dapm_widget e750_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), SND_SOC_DAPM_MIC("Mic (Internal)", NULL), SND_SOC_DAPM_PGA_E("Headphone Amp", SND_SOC_NOPM, 0, 0, NULL, 0, e750_hp_amp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("Speaker Amp", SND_SOC_NOPM, 0, 0, NULL, 0, e750_spk_amp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route audio_map[] = { {"Headphone Amp", NULL, "HPOUTL"}, {"Headphone Amp", NULL, "HPOUTR"}, {"Headphone Jack", NULL, "Headphone Amp"}, {"Speaker Amp", NULL, "MONOOUT"}, {"Speaker", NULL, "Speaker Amp"}, {"MIC1", NULL, "Mic (Internal)"}, }; static struct snd_soc_dai_link e750_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", /* use ops to check startup state */ }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", }, }; static struct snd_soc_card e750 = { .name = "Toshiba e750", .owner = THIS_MODULE, .dai_link = e750_dai, .num_links = ARRAY_SIZE(e750_dai), .dapm_widgets = e750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(e750_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), .fully_routed = true, }; static struct gpio e750_audio_gpios[] = { { GPIO_E750_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Headphone amp" }, { GPIO_E750_SPK_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" }, }; static int e750_probe(struct platform_device *pdev) { struct snd_soc_card *card = &e750; int ret; ret = gpio_request_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios)); if (ret) return ret; card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios)); } return ret; } static int e750_remove(struct platform_device *pdev) { gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios)); return 0; } static struct platform_driver e750_driver = { .driver = { .name = "e750-audio", .pm = &snd_soc_pm_ops, }, .probe = e750_probe, .remove = e750_remove, }; module_platform_driver(e750_driver); /* Module information */ MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); MODULE_DESCRIPTION("ALSA SoC driver for e750"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:e750-audio");
koct9i/linux
sound/soc/pxa/e750_wm9705.c
C
gpl-2.0
3,657
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * Toshiba MIPI-DSI-to-LVDS Bridge driver. * Device Model TC358764XBG/65XBG. * Reference document: TC358764XBG_65XBG_V119.pdf * * The Host sends a DSI Generic Long Write packet (Data ID = 0x29) over the * DSI link for each write access transaction to the chip configuration * registers. * Payload of this packet is 16-bit register address and 32-bit data. * Multiple data values are allowed for sequential addresses. * * The Host sends a DSI Generic Read packet (Data ID = 0x24) over the DSI * link for each read request transaction to the chip configuration * registers. Payload of this packet is further defined as follows: * 16-bit address followed by a 32-bit value (Generic Long Read Response * packet). * * The bridge supports 5 GPIO lines controlled via the GPC register. * * The bridge support I2C Master/Slave. * The I2C slave can be used for read/write to the bridge register instead of * using the DSI interface. * I2C slave address is 0x0F (read/write 0x1F/0x1E). * The I2C Master can be used for communication with the panel if * it has an I2C slave. * * NOTE: The I2C interface is not used in this driver. * Only the DSI interface is used for read/write the bridge registers. * * Pixel data can be transmitted in non-burst or burst fashion. * Non-burst refers to pixel data packet transmission time on DSI link * being roughly the same (to account for packet overhead time) * as active video line time on LVDS output (i.e. DE = 1). * And burst refers to pixel data packet transmission time on DSI link * being less than the active video line time on LVDS output. * Video mode transmission is further differentiated by the types of * timing events being transmitted. * Video pulse mode refers to the case where both sync start and sync end * events (for frame and line) are transmitted. * Video event mode refers to the case where only sync start events * are transmitted. * This is configured via register bit VPCTRL.EVTMODE. * */ /* #define DEBUG 1 */ /** * Use the I2C master to control the panel. */ /* #define TC358764_USE_I2C_MASTER */ #define DRV_NAME "mipi_tc358764" #include <linux/i2c.h> #include <linux/delay.h> #include <linux/pwm.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_tc358764_dsi2lvds.h" /* Registers definition */ /* DSI D-PHY Layer Registers */ #define D0W_DPHYCONTTX 0x0004 /* Data Lane 0 DPHY Tx Control */ #define CLW_DPHYCONTRX 0x0020 /* Clock Lane DPHY Rx Control */ #define D0W_DPHYCONTRX 0x0024 /* Data Lane 0 DPHY Rx Control */ #define D1W_DPHYCONTRX 0x0028 /* Data Lane 1 DPHY Rx Control */ #define D2W_DPHYCONTRX 0x002C /* Data Lane 2 DPHY Rx Control */ #define D3W_DPHYCONTRX 0x0030 /* Data Lane 3 DPHY Rx Control */ #define COM_DPHYCONTRX 0x0038 /* DPHY Rx Common Control */ #define CLW_CNTRL 0x0040 /* Clock Lane Control */ #define D0W_CNTRL 0x0044 /* Data Lane 0 Control */ #define D1W_CNTRL 0x0048 /* Data Lane 1 Control */ #define D2W_CNTRL 0x004C /* Data Lane 2 Control */ #define D3W_CNTRL 0x0050 /* Data Lane 3 Control */ #define DFTMODE_CNTRL 0x0054 /* DFT Mode Control */ /* DSI PPI Layer Registers */ #define PPI_STARTPPI 0x0104 /* START control bit of PPI-TX function. */ #define PPI_BUSYPPI 0x0108 #define PPI_LINEINITCNT 0x0110 /* Line Initialization Wait Counter */ #define PPI_LPTXTIMECNT 0x0114 #define PPI_LANEENABLE 0x0134 /* Enables each lane at the PPI layer. */ #define PPI_TX_RX_TA 0x013C /* DSI Bus Turn Around timing parameters */ /* Analog timer function enable */ #define PPI_CLS_ATMR 0x0140 /* Delay for Clock Lane in LPRX */ #define PPI_D0S_ATMR 0x0144 /* Delay for Data Lane 0 in LPRX */ #define PPI_D1S_ATMR 0x0148 /* Delay for Data Lane 1 in LPRX */ #define PPI_D2S_ATMR 0x014C /* Delay for Data Lane 2 in LPRX */ #define PPI_D3S_ATMR 0x0150 /* Delay for Data Lane 3 in LPRX */ #define PPI_D0S_CLRSIPOCOUNT 0x0164 #define PPI_D1S_CLRSIPOCOUNT 0x0168 /* For lane 1 */ #define PPI_D2S_CLRSIPOCOUNT 0x016C /* For lane 2 */ #define PPI_D3S_CLRSIPOCOUNT 0x0170 /* For lane 3 */ #define CLS_PRE 0x0180 /* Digital Counter inside of PHY IO */ #define D0S_PRE 0x0184 /* Digital Counter inside of PHY IO */ #define D1S_PRE 0x0188 /* Digital Counter inside of PHY IO */ #define D2S_PRE 0x018C /* Digital Counter inside of PHY IO */ #define D3S_PRE 0x0190 /* Digital Counter inside of PHY IO */ #define CLS_PREP 0x01A0 /* Digital Counter inside of PHY IO */ #define D0S_PREP 0x01A4 /* Digital Counter inside of PHY IO */ #define D1S_PREP 0x01A8 /* Digital Counter inside of PHY IO */ #define D2S_PREP 0x01AC /* Digital Counter inside of PHY IO */ #define D3S_PREP 0x01B0 /* Digital Counter inside of PHY IO */ #define CLS_ZERO 0x01C0 /* Digital Counter inside of PHY IO */ #define D0S_ZERO 0x01C4 /* Digital Counter inside of PHY IO */ #define D1S_ZERO 0x01C8 /* Digital Counter inside of PHY IO */ #define D2S_ZERO 0x01CC /* Digital Counter inside of PHY IO */ #define D3S_ZERO 0x01D0 /* Digital Counter inside of PHY IO */ #define PPI_CLRFLG 0x01E0 /* PRE Counters has reached set values */ #define PPI_CLRSIPO 0x01E4 /* Clear SIPO values, Slave mode use only. */ #define HSTIMEOUT 0x01F0 /* HS Rx Time Out Counter */ #define HSTIMEOUTENABLE 0x01F4 /* Enable HS Rx Time Out Counter */ #define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX function */ #define DSI_BUSYDSI 0x0208 #define DSI_LANEENABLE 0x0210 /* Enables each lane at the Protocol layer. */ #define DSI_LANESTATUS0 0x0214 /* Displays lane is in HS RX mode. */ #define DSI_LANESTATUS1 0x0218 /* Displays lane is in ULPS or STOP state */ #define DSI_INTSTATUS 0x0220 /* Interrupt Status */ #define DSI_INTMASK 0x0224 /* Interrupt Mask */ #define DSI_INTCLR 0x0228 /* Interrupt Clear */ #define DSI_LPTXTO 0x0230 /* Low Power Tx Time Out Counter */ #define DSIERRCNT 0x0300 /* DSI Error Count */ #define APLCTRL 0x0400 /* Application Layer Control */ #define RDPKTLN 0x0404 /* Command Read Packet Length */ #define VPCTRL 0x0450 /* Video Path Control */ #define HTIM1 0x0454 /* Horizontal Timing Control 1 */ #define HTIM2 0x0458 /* Horizontal Timing Control 2 */ #define VTIM1 0x045C /* Vertical Timing Control 1 */ #define VTIM2 0x0460 /* Vertical Timing Control 2 */ #define VFUEN 0x0464 /* Video Frame Timing Update Enable */ /* Mux Input Select for LVDS LINK Input */ #define LVMX0003 0x0480 /* Bit 0 to 3 */ #define LVMX0407 0x0484 /* Bit 4 to 7 */ #define LVMX0811 0x0488 /* Bit 8 to 11 */ #define LVMX1215 0x048C /* Bit 12 to 15 */ #define LVMX1619 0x0490 /* Bit 16 to 19 */ #define LVMX2023 0x0494 /* Bit 20 to 23 */ #define LVMX2427 0x0498 /* Bit 24 to 27 */ #define LVCFG 0x049C /* LVDS Configuration */ #define LVPHY0 0x04A0 /* LVDS PHY 0 */ #define LVPHY1 0x04A4 /* LVDS PHY 1 */ #define SYSSTAT 0x0500 /* System Status */ #define SYSRST 0x0504 /* System Reset */ /* GPIO Registers */ #define GPIOC 0x0520 /* GPIO Control */ #define GPIOO 0x0524 /* GPIO Output */ #define GPIOI 0x0528 /* GPIO Input */ /* I2C Registers */ #define I2CTIMCTRL 0x0540 /* I2C IF Timing and Enable Control */ #define I2CMADDR 0x0544 /* I2C Master Addressing */ #define WDATAQ 0x0548 /* Write Data Queue */ #define RDATAQ 0x054C /* Read Data Queue */ /* Chip ID and Revision ID Register */ #define IDREG 0x0580 #define TC358764XBG_ID 0x00006500 /* Debug Registers */ #define DEBUG00 0x05A0 /* Debug */ #define DEBUG01 0x05A4 /* LVDS Data */ /* PWM */ #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT_PANEL) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT_PANEL) #define PWM_FREQ_HZ (5*1000) /* 33 KHZ */ #define PWM_LEVEL 200 #else #define PWM_FREQ_HZ (66*1000) /* 66 KHZ */ #define PWM_LEVEL 15 #endif #define PWM_PERIOD_USEC (USEC_PER_SEC / PWM_FREQ_HZ) #define PWM_DUTY_LEVEL (PWM_PERIOD_USEC / PWM_LEVEL) #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT_PANEL) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT_PANEL) #define PWM_DUTY_MAX PWM_DUTY_LEVEL #define DUTY_DIM 5 #define DUTY_MIN 8 #define DUTY_25 20 #define DUTY_DEFAULT 70 #define DUTY_MAX 189 /* Backlight levels */ #define BRIGHTNESS_OFF 0 #define BRIGHTNESS_DIM 20 #define BRIGHTNESS_MIN 30 #define BRIGHTNESS_25 86 #define BRIGHTNESS_DEFAULT 140 #define BRIGHTNESS_MAX 255 #endif #define CMD_DELAY 100 #define DSI_MAX_LANES 4 #define KHZ 1000 #define MHZ (1000*1000) /** * Command payload for DTYPE_GEN_LWRITE (0x29) / DTYPE_GEN_READ2 (0x24). */ struct wr_cmd_payload { u16 addr; u32 data; } __packed; /* * Driver state. */ static struct msm_panel_common_pdata *d2l_common_pdata; struct msm_fb_data_type *d2l_mfd; static struct dsi_buf d2l_tx_buf; static struct dsi_buf d2l_rx_buf; static int led_pwm; static struct pwm_device *bl_pwm; static int initial_powerseq; static int bl_level; #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT) static struct delayed_work det_work; #endif static u32 d2l_gpio_out_mask; static u32 d2l_gpio_out_val; static int mipi_d2l_init(void); /** * Read a bridge register * * @param mfd * * @return register data value */ static u32 mipi_d2l_read_reg(struct msm_fb_data_type *mfd, u16 reg) { u32 data; int len = 4; struct dsi_cmd_desc cmd_read_reg = { DTYPE_GEN_READ2, 1, 0, 1, 0, /* cmd 0x24 */ sizeof(reg), (char *) &reg}; mipi_dsi_buf_init(&d2l_tx_buf); mipi_dsi_buf_init(&d2l_rx_buf); /* mutex had been acquried at dsi_on */ len = mipi_dsi_cmds_rx(mfd, &d2l_tx_buf, &d2l_rx_buf, &cmd_read_reg, len); data = *(u32 *)d2l_rx_buf.data; if (len != 4) pr_err("%s: invalid rlen=%d, expecting 4.\n", __func__, len); pr_debug("%s: reg=0x%x.data=0x%08x.\n", __func__, reg, data); return data; } /** * Write a bridge register * * @param mfd * * @return register data value */ static u32 mipi_d2l_write_reg(struct msm_fb_data_type *mfd, u16 reg, u32 data) { struct wr_cmd_payload payload; struct dsi_cmd_desc cmd_write_reg = { DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(payload), (char *)&payload}; payload.addr = reg; payload.data = data; /* mutex had been acquried at dsi_on */ mipi_dsi_cmds_tx(mfd, &d2l_tx_buf, &cmd_write_reg, 1); pr_debug("%s: reg=0x%x. data=0x%x.\n", __func__, reg, data); return data; } /* * Init the D2L bridge via the DSI interface for Video. * * Register Addr Value * =================================================== * PPI_TX_RX_TA 0x013C 0x00040004 * PPI_LPTXTIMECNT 0x0114 0x00000004 * PPI_D0S_CLRSIPOCOUNT 0x0164 0x00000003 * PPI_D1S_CLRSIPOCOUNT 0x0168 0x00000003 * PPI_D2S_CLRSIPOCOUNT 0x016C 0x00000003 * PPI_D3S_CLRSIPOCOUNT 0x0170 0x00000003 * PPI_LANEENABLE 0x0134 0x0000001F * DSI_LANEENABLE 0x0210 0x0000001F * PPI_STARTPPI 0x0104 0x00000001 * DSI_STARTDSI 0x0204 0x00000001 * VPCTRL 0x0450 0x01000120 * HTIM1 0x0454 0x002C0028 * VTIM1 0x045C 0x001E0008 * VFUEN 0x0464 0x00000001 * LVCFG 0x049C 0x00000001 * * VPCTRL.EVTMODE (0x20) configuration bit is needed to determine whether * video timing information is delivered in pulse mode or event mode. * In pulse mode, both Sync Start and End packets are required. * In event mode, only Sync Start packets are required. * * @param mfd * * @return register data value */ static int mipi_d2l_dsi_init_sequence(struct msm_fb_data_type *mfd) { struct mipi_panel_info *mipi = &mfd->panel_info.mipi; u32 lanes_enable; u32 vpctrl; u32 htime1 = 0x002C0028; u32 vtime1 = 0x001E0008; lanes_enable = 0x01; /* clock-lane enable */ lanes_enable |= (mipi->data_lane0 << 1); lanes_enable |= (mipi->data_lane1 << 2); lanes_enable |= (mipi->data_lane2 << 3); lanes_enable |= (mipi->data_lane3 << 4); if (mipi->traffic_mode == DSI_NON_BURST_SYNCH_EVENT) vpctrl = 0x01000120; else if (mipi->traffic_mode == DSI_NON_BURST_SYNCH_PULSE) vpctrl = 0x01000100; else { pr_err("%s.unsupported traffic_mode %d.\n", __func__, mipi->traffic_mode); return -EINVAL; } pr_debug("%s.htime1=0x%x.\n", __func__, htime1); pr_debug("%s.vtime1=0x%x.\n", __func__, vtime1); pr_debug("%s.vpctrl=0x%x.\n", __func__, vpctrl); pr_debug("%s.lanes_enable=0x%x.\n", __func__, lanes_enable); #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT) /* VESA format instead of JEIDA format for RGB888 */ mipi_d2l_write_reg(mfd, LVMX0003, 0x03020100); mipi_d2l_write_reg(mfd, LVMX0407, 0x08050704); mipi_d2l_write_reg(mfd, LVMX0811, 0x0F0E0A09); mipi_d2l_write_reg(mfd, LVMX1215, 0x100D0C0B); mipi_d2l_write_reg(mfd, LVMX1619, 0x12111716); mipi_d2l_write_reg(mfd, LVMX2023, 0x1B151413); mipi_d2l_write_reg(mfd, LVMX2427, 0x061A1918); mipi_d2l_write_reg(mfd, PPI_TX_RX_TA, 0x00030005); /* BTA */ mipi_d2l_write_reg(mfd, PPI_LPTXTIMECNT, 0x00000003); mipi_d2l_write_reg(mfd, PPI_D0S_CLRSIPOCOUNT, 0x00000002); mipi_d2l_write_reg(mfd, PPI_D1S_CLRSIPOCOUNT, 0x00000002); mipi_d2l_write_reg(mfd, PPI_D2S_CLRSIPOCOUNT, 0x00000002); mipi_d2l_write_reg(mfd, PPI_D3S_CLRSIPOCOUNT, 0x00000002); mipi_d2l_write_reg(mfd, PPI_LANEENABLE, 0x0000001F); mipi_d2l_write_reg(mfd, DSI_LANEENABLE, 0x0000001F); mipi_d2l_write_reg(mfd, PPI_STARTPPI, 0x00000001); mipi_d2l_write_reg(mfd, DSI_STARTDSI, 0x00000001); mipi_d2l_write_reg(mfd, VPCTRL, 0x03F00121); /* RGB888 + Event mode */ mipi_d2l_write_reg(mfd, HTIM1, 0x00140114); mipi_d2l_write_reg(mfd, HTIM2, 0x00340400); mipi_d2l_write_reg(mfd, VTIM1, 0x0022000A); mipi_d2l_write_reg(mfd, VTIM1, 0x00150258); mipi_d2l_write_reg(mfd, VFUEN, 0x00000001); mipi_d2l_write_reg(mfd, LVPHY0, 0x0044802D); udelay(20); mipi_d2l_write_reg(mfd, LVPHY0, 0x0004802D); mipi_d2l_write_reg(mfd, LVCFG, 0x00000101); #else mipi_d2l_write_reg(mfd, SYSRST, 0xFF); msleep(30); /* VESA format instead of JEIDA format for RGB888 */ mipi_d2l_write_reg(mfd, LVMX0003, 0x03020100); mipi_d2l_write_reg(mfd, LVMX0407, 0x08050704); mipi_d2l_write_reg(mfd, LVMX0811, 0x0F0E0A09); mipi_d2l_write_reg(mfd, LVMX1215, 0x100D0C0B); mipi_d2l_write_reg(mfd, LVMX1619, 0x12111716); mipi_d2l_write_reg(mfd, LVMX2023, 0x1B151413); mipi_d2l_write_reg(mfd, LVMX2427, 0x061A1918); mipi_d2l_write_reg(mfd, PPI_TX_RX_TA, 0x00040004); /* BTA */ mipi_d2l_write_reg(mfd, PPI_LPTXTIMECNT, 0x00000004); mipi_d2l_write_reg(mfd, PPI_D0S_CLRSIPOCOUNT, 0x00000003); mipi_d2l_write_reg(mfd, PPI_D1S_CLRSIPOCOUNT, 0x00000003); mipi_d2l_write_reg(mfd, PPI_D2S_CLRSIPOCOUNT, 0x00000003); mipi_d2l_write_reg(mfd, PPI_D3S_CLRSIPOCOUNT, 0x00000003); mipi_d2l_write_reg(mfd, PPI_LANEENABLE, lanes_enable); mipi_d2l_write_reg(mfd, DSI_LANEENABLE, lanes_enable); mipi_d2l_write_reg(mfd, PPI_STARTPPI, 0x00000001); mipi_d2l_write_reg(mfd, DSI_STARTDSI, 0x00000001); mipi_d2l_write_reg(mfd, VPCTRL, vpctrl); /* RGB888 + Event mode */ mipi_d2l_write_reg(mfd, HTIM1, htime1); mipi_d2l_write_reg(mfd, VTIM1, vtime1); mipi_d2l_write_reg(mfd, VFUEN, 0x00000001); mipi_d2l_write_reg(mfd, LVCFG, 0x00000001); /* Enables LVDS tx */ #endif /* CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT */ return 0; } #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT_PANEL) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT_PANEL) static int scale_pwm_dutycycle(int level) { int scaled_level = 0; if (level == BRIGHTNESS_OFF) scaled_level = BRIGHTNESS_OFF; else if (level <= BRIGHTNESS_DIM) scaled_level = PWM_DUTY_MAX*DUTY_DIM; else if (level <= BRIGHTNESS_MIN) scaled_level = (level - BRIGHTNESS_DIM) * (PWM_DUTY_MAX * DUTY_MIN - PWM_DUTY_MAX * DUTY_DIM) / (BRIGHTNESS_MIN - BRIGHTNESS_DIM) + PWM_DUTY_MAX * DUTY_DIM; else if (level <= BRIGHTNESS_25) scaled_level = (level - BRIGHTNESS_MIN) * (PWM_DUTY_MAX * DUTY_25 - PWM_DUTY_MAX * DUTY_MIN) / (BRIGHTNESS_25 - BRIGHTNESS_MIN) + PWM_DUTY_MAX * DUTY_MIN; else if (level <= BRIGHTNESS_DEFAULT) scaled_level = (level - BRIGHTNESS_25) * (PWM_DUTY_MAX * DUTY_DEFAULT - PWM_DUTY_MAX * DUTY_25) / (BRIGHTNESS_DEFAULT - BRIGHTNESS_25) + PWM_DUTY_MAX * DUTY_25; else if (level <= BRIGHTNESS_MAX) scaled_level = (level - BRIGHTNESS_DEFAULT) * (PWM_DUTY_MAX * DUTY_MAX - PWM_DUTY_MAX * DUTY_DEFAULT) / (BRIGHTNESS_MAX - BRIGHTNESS_DEFAULT) + PWM_DUTY_MAX * DUTY_DEFAULT; return scaled_level; } #endif /** * Set Backlight level. * * @param pwm * @param level * * @return int */ static int mipi_d2l_set_backlight_level(struct pwm_device *pwm, int level) { int ret = 0; pr_debug("%s: level=%d.\n", __func__, level); #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT_PANEL) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT_PANEL) if ((pwm == NULL) || (level > BRIGHTNESS_MAX) || (level < 0)) { pr_err("%s.pwm=NULL.\n", __func__); return -EINVAL; } level = scale_pwm_dutycycle(level); #else if ((pwm == NULL) || (level > PWM_LEVEL) || (level < 0)) { pr_err("%s.pwm=NULL.\n", __func__); return -EINVAL; } #endif ret = pwm_config(pwm, PWM_DUTY_LEVEL * level, PWM_PERIOD_USEC); if (ret) { pr_err("%s: pwm_config() failed err=%d.\n", __func__, ret); return ret; } ret = pwm_enable(pwm); if (ret) { pr_err("%s: pwm_enable() failed err=%d\n", __func__, ret); return ret; } return 0; } #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT) static void blenable_work_func(struct work_struct *work) { int ret = 0; /* Set backlight via PWM */ if (bl_pwm) { ret = mipi_d2l_set_backlight_level(bl_pwm, bl_level); if (ret) pr_err("%s.mipi_d2l_set_backlight_level.ret=%d", __func__, ret); } } #endif /** * LCD ON. * * Set LCD On via MIPI interface or I2C-Slave interface. * Set Backlight on. * * @param pdev * * @return int */ static int mipi_d2l_lcd_on(struct platform_device *pdev) { int ret = 0; u32 chip_id; struct msm_fb_data_type *mfd; pr_info("%s.\n", __func__); /* wait for valid clock before sending data over DSI or I2C. */ msleep(30); mfd = platform_get_drvdata(pdev); d2l_mfd = mfd; if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; chip_id = mipi_d2l_read_reg(mfd, IDREG); if (chip_id != TC358764XBG_ID) { pr_err("%s: invalid chip_id=0x%x", __func__, chip_id); return -ENODEV; } ret = mipi_d2l_dsi_init_sequence(mfd); if (ret) return ret; mipi_d2l_write_reg(mfd, GPIOC, d2l_gpio_out_mask); /* Set GPIOs: gpio#4=U/D=0 , gpio#3=L/R=1 , gpio#2,1=CABC=0. */ mipi_d2l_write_reg(mfd, GPIOO, d2l_gpio_out_val); #if defined(CONFIG_FB_MSM_MIPI_BOEOT_TFT_VIDEO_WSVGA_PT_PANEL) \ || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WXGA_PT_PANEL) if ((bl_level == 0) && (!initial_powerseq)) { bl_level = BRIGHTNESS_DEFAULT ; /* Default ON value */ INIT_DELAYED_WORK(&det_work, blenable_work_func); schedule_delayed_work(&det_work, msecs_to_jiffies(250)); } #else if ((bl_level == 0) && (!initial_powerseq)) bl_level = PWM_LEVEL * 2 / 3 ; /* Default ON value */ /* Set backlight via PWM */ if (bl_pwm) { ret = mipi_d2l_set_backlight_level(bl_pwm, bl_level); if (ret) pr_err("%s.mipi_d2l_set_backlight_level.ret=%d", __func__, ret); } #endif pr_info("%s.ret=%d.\n", __func__, ret); /* Set power on flag */ initial_powerseq = 1; return ret; } /** * LCD OFF. * * @param pdev * * @return int */ static int mipi_d2l_lcd_off(struct platform_device *pdev) { int ret; struct msm_fb_data_type *mfd; pr_info("%s.\n", __func__); mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; ret = mipi_d2l_set_backlight_level(bl_pwm, 0); pr_info("%s.ret=%d.\n", __func__, ret); return ret; } static void mipi_d2l_set_backlight(struct msm_fb_data_type *mfd) { int level = mfd->bl_level; pr_debug("%s.lvl=%d.\n", __func__, level); mipi_d2l_set_backlight_level(bl_pwm, level); bl_level = level; } static struct msm_fb_panel_data d2l_panel_data = { .on = mipi_d2l_lcd_on, .off = mipi_d2l_lcd_off, .set_backlight = mipi_d2l_set_backlight, }; /** * Probe for device. * * Both the "target" and "panel" device use the same probe function. * "Target" device has id=0, "Panel" devic has non-zero id. * Target device should register first, passing msm_panel_common_pdata. * Panel device passing msm_panel_info. * * @param pdev * * @return int */ static int __devinit mipi_d2l_probe(struct platform_device *pdev) { int ret = 0; struct msm_panel_info *pinfo = NULL; pr_debug("%s.id=%d.\n", __func__, pdev->id); if (pdev->id == 0) { /* d2l_common_pdata = platform_get_drvdata(pdev); */ d2l_common_pdata = pdev->dev.platform_data; if (d2l_common_pdata == NULL) { pr_err("%s: no PWM gpio specified.\n", __func__); return 0; } led_pwm = d2l_common_pdata->gpio_num[0]; d2l_gpio_out_mask = d2l_common_pdata->gpio_num[1] >> 8; d2l_gpio_out_val = d2l_common_pdata->gpio_num[1] & 0xFF; mipi_dsi_buf_alloc(&d2l_tx_buf, DSI_BUF_SIZE); mipi_dsi_buf_alloc(&d2l_rx_buf, DSI_BUF_SIZE); return 0; } if (d2l_common_pdata == NULL) { pr_err("%s: d2l_common_pdata is NULL.\n", __func__); return -ENODEV; } bl_pwm = NULL; if (led_pwm >= 0) { bl_pwm = pwm_request(led_pwm, "lcd-backlight"); if (bl_pwm == NULL || IS_ERR(bl_pwm)) { pr_err("%s pwm_request() failed.id=%d.bl_pwm=%d.\n", __func__, led_pwm, (int) bl_pwm); bl_pwm = NULL; return -EIO; } else { pr_debug("%s.pwm_request() ok.pwm-id=%d.\n", __func__, led_pwm); } } else { pr_info("%s. led_pwm is invalid.\n", __func__); } /* pinfo = platform_get_drvdata(pdev); */ pinfo = pdev->dev.platform_data; if (pinfo == NULL) { pr_err("%s: pinfo is NULL.\n", __func__); return -ENODEV; } d2l_panel_data.panel_info = *pinfo; pdev->dev.platform_data = &d2l_panel_data; msm_fb_add_device(pdev); return ret; } /** * Device removal notification handler. * * @param pdev * * @return int */ static int __devexit mipi_d2l_remove(struct platform_device *pdev) { /* Note: There are no APIs to remove fb device and free DSI buf. */ pr_debug("%s.\n", __func__); if (bl_pwm) { pwm_free(bl_pwm); bl_pwm = NULL; } return 0; } /** * Register the panel device. * * @param pinfo * @param channel_id * @param panel_id * * @return int */ int mipi_tc358764_dsi2lvds_register(struct msm_panel_info *pinfo, u32 channel_id, u32 panel_id) { struct platform_device *pdev = NULL; int ret; /* Use DSI-to-LVDS bridge */ const char driver_name[] = "mipi_tc358764"; pr_debug("%s.\n", __func__); ret = mipi_d2l_init(); if (ret) { pr_err("mipi_d2l_init() failed with ret %u\n", ret); return ret; } /* Note: the device id should be non-zero */ pdev = platform_device_alloc(driver_name, (panel_id << 8)|channel_id); if (pdev == NULL) return -ENOMEM; pdev->dev.platform_data = pinfo; ret = platform_device_add(pdev); if (ret) { pr_err("%s: platform_device_register failed!\n", __func__); goto err_device_put; } return 0; err_device_put: platform_device_put(pdev); return ret; } static struct platform_driver d2l_driver = { .probe = mipi_d2l_probe, .remove = __devexit_p(mipi_d2l_remove), .driver = { .name = DRV_NAME, }, }; /** * Module Init * * @return int */ static int mipi_d2l_init(void) { pr_debug("%s.\n", __func__); return platform_driver_register(&d2l_driver); } MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Toshiba MIPI-DSI-to-LVDS bridge driver"); MODULE_AUTHOR("Amir Samuelov <amirs@codeaurora.org>");
sgs3/SGH-T999V_Kernel
drivers/video/msm/mipi_tc358764_dsi2lvds.c
C
gpl-2.0
24,068
/* GStreamer base utils library missing plugins support * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /** * SECTION:gstpbutilsmissingplugins * @short_description: Create, recognise and parse missing-plugins messages * * <refsect2> * <para> * Functions to create, recognise and parse missing-plugins messages for * applications and elements. * </para> * <para> * Missing-plugin messages are posted on the bus by elements like decodebin * or playbin if they can't find an appropriate source element or decoder * element. The application can use these messages for two things: * <itemizedlist> * <listitem><para> * concise error/problem reporting to the user mentioning what exactly * is missing, see gst_missing_plugin_message_get_description() * </para></listitem> * <listitem><para> * initiate installation of missing plugins, see * gst_missing_plugin_message_get_installer_detail() and * gst_install_plugins_async() * </para></listitem> * </itemizedlist> * </para> * <para> * Applications may also create missing-plugin messages themselves to install * required elements that are missing, using the install mechanism mentioned * above. * </para> * </refsect2> */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> #endif #ifdef HAVE_UNISTD_H # include <unistd.h> /* getpid on UNIX */ #endif #ifdef HAVE_PROCESS_H # include <process.h> /* getpid on win32 */ #endif #include "gst/gst-i18n-plugin.h" #include "pbutils.h" #include "pbutils-private.h" #include <string.h> #define GST_DETAIL_STRING_MARKER "gstreamer" typedef enum { GST_MISSING_TYPE_UNKNOWN = 0, GST_MISSING_TYPE_URISOURCE, GST_MISSING_TYPE_URISINK, GST_MISSING_TYPE_ELEMENT, GST_MISSING_TYPE_DECODER, GST_MISSING_TYPE_ENCODER } GstMissingType; static const struct { GstMissingType type; const gchar type_string[12]; } missing_type_mapping[] = { { GST_MISSING_TYPE_URISOURCE, "urisource"}, { GST_MISSING_TYPE_URISINK, "urisink"}, { GST_MISSING_TYPE_ELEMENT, "element"}, { GST_MISSING_TYPE_DECODER, "decoder"}, { GST_MISSING_TYPE_ENCODER, "encoder"} }; static GstMissingType missing_structure_get_type (const GstStructure * s) { const gchar *type; guint i; type = gst_structure_get_string (s, "type"); g_return_val_if_fail (type != NULL, GST_MISSING_TYPE_UNKNOWN); for (i = 0; i < G_N_ELEMENTS (missing_type_mapping); ++i) { if (strcmp (missing_type_mapping[i].type_string, type) == 0) return missing_type_mapping[i].type; } return GST_MISSING_TYPE_UNKNOWN; } GstCaps * copy_and_clean_caps (const GstCaps * caps) { GstStructure *s; GstCaps *ret; ret = gst_caps_copy (caps); /* make caps easier to interpret, remove common fields that are likely * to be irrelevant for determining the right plugin (ie. mostly fields * where template caps usually have the standard MIN - MAX range as value) */ s = gst_caps_get_structure (ret, 0); gst_structure_remove_field (s, "codec_data"); gst_structure_remove_field (s, "palette_data"); gst_structure_remove_field (s, "pixel-aspect-ratio"); gst_structure_remove_field (s, "framerate"); gst_structure_remove_field (s, "leaf_size"); gst_structure_remove_field (s, "packet_size"); gst_structure_remove_field (s, "block_align"); gst_structure_remove_field (s, "metadata-interval"); /* icy caps */ /* decoders/encoders almost always handle the usual width/height/channel/rate * range (and if we don't remove this then the app will have a much harder * time blacklisting formats it has unsuccessfully tried to install before) */ gst_structure_remove_field (s, "width"); gst_structure_remove_field (s, "depth"); gst_structure_remove_field (s, "height"); gst_structure_remove_field (s, "channels"); gst_structure_remove_field (s, "rate"); /* rtp fields */ gst_structure_remove_field (s, "config"); gst_structure_remove_field (s, "clock-rate"); gst_structure_remove_field (s, "clock-base"); gst_structure_remove_field (s, "maxps"); gst_structure_remove_field (s, "seqnum-base"); gst_structure_remove_field (s, "npt-start"); gst_structure_remove_field (s, "npt-stop"); gst_structure_remove_field (s, "play-speed"); gst_structure_remove_field (s, "play-scale"); gst_structure_remove_field (s, "dynamic_range"); return ret; } /** * gst_missing_uri_source_message_new: * @element: the #GstElement posting the message * @protocol: the URI protocol the missing source needs to implement, * e.g. "http" or "mms" * * Creates a missing-plugin message for @element to notify the application * that a source element for a particular URI protocol is missing. This * function is mainly for use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_uri_source_message_new (GstElement * element, const gchar * protocol) { GstStructure *s; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (protocol != NULL, NULL); description = gst_pb_utils_get_source_description (protocol); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "urisource", "detail", G_TYPE_STRING, protocol, "name", G_TYPE_STRING, description, NULL); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); } /** * gst_missing_uri_sink_message_new: * @element: the #GstElement posting the message * @protocol: the URI protocol the missing sink needs to implement, * e.g. "http" or "smb" * * Creates a missing-plugin message for @element to notify the application * that a sink element for a particular URI protocol is missing. This * function is mainly for use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_uri_sink_message_new (GstElement * element, const gchar * protocol) { GstStructure *s; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (protocol != NULL, NULL); description = gst_pb_utils_get_sink_description (protocol); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "urisink", "detail", G_TYPE_STRING, protocol, "name", G_TYPE_STRING, description, NULL); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); } /** * gst_missing_element_message_new: * @element: the #GstElement posting the message * @factory_name: the name of the missing element (element factory), * e.g. "videoscale" or "cdparanoiasrc" * * Creates a missing-plugin message for @element to notify the application * that a certain required element is missing. This function is mainly for * use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_element_message_new (GstElement * element, const gchar * factory_name) { GstStructure *s; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (factory_name != NULL, NULL); description = gst_pb_utils_get_element_description (factory_name); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "element", "detail", G_TYPE_STRING, factory_name, "name", G_TYPE_STRING, description, NULL); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); } /** * gst_missing_decoder_message_new: * @element: the #GstElement posting the message * @decode_caps: the (fixed) caps for which a decoder element is needed * * Creates a missing-plugin message for @element to notify the application * that a decoder element for a particular set of (fixed) caps is missing. * This function is mainly for use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_decoder_message_new (GstElement * element, const GstCaps * decode_caps) { GstStructure *s; GstCaps *caps; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (decode_caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (decode_caps), NULL); g_return_val_if_fail (!gst_caps_is_any (decode_caps), NULL); g_return_val_if_fail (!gst_caps_is_empty (decode_caps), NULL); g_return_val_if_fail (gst_caps_is_fixed (decode_caps), NULL); description = gst_pb_utils_get_decoder_description (decode_caps); caps = copy_and_clean_caps (decode_caps); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "decoder", "detail", GST_TYPE_CAPS, caps, "name", G_TYPE_STRING, description, NULL); gst_caps_unref (caps); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); } /** * gst_missing_encoder_message_new: * @element: the #GstElement posting the message * @encode_caps: the (fixed) caps for which an encoder element is needed * * Creates a missing-plugin message for @element to notify the application * that an encoder element for a particular set of (fixed) caps is missing. * This function is mainly for use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_encoder_message_new (GstElement * element, const GstCaps * encode_caps) { GstStructure *s; GstCaps *caps; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (encode_caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_any (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_empty (encode_caps), NULL); g_return_val_if_fail (gst_caps_is_fixed (encode_caps), NULL); description = gst_pb_utils_get_encoder_description (encode_caps); caps = copy_and_clean_caps (encode_caps); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "encoder", "detail", GST_TYPE_CAPS, caps, "name", G_TYPE_STRING, description, NULL); gst_caps_unref (caps); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); } static gboolean missing_structure_get_string_detail (const GstStructure * s, gchar ** p_detail) { const gchar *detail; GType detail_type; *p_detail = NULL; detail_type = gst_structure_get_field_type (s, "detail"); if (!g_type_is_a (detail_type, G_TYPE_STRING)) { GST_WARNING ("expected 'detail' field to be of G_TYPE_STRING"); return FALSE; } detail = gst_structure_get_string (s, "detail"); if (detail == NULL || *detail == '\0') { GST_WARNING ("empty 'detail' field"); return FALSE; } *p_detail = g_strdup (detail); return TRUE; } static gboolean missing_structure_get_caps_detail (const GstStructure * s, GstCaps ** p_caps) { const GstCaps *caps; const GValue *val; GType detail_type; *p_caps = NULL; detail_type = gst_structure_get_field_type (s, "detail"); if (!g_type_is_a (detail_type, GST_TYPE_CAPS)) { GST_WARNING ("expected 'detail' field to be of GST_TYPE_CAPS"); return FALSE; } val = gst_structure_get_value (s, "detail"); caps = gst_value_get_caps (val); if (gst_caps_is_empty (caps) || gst_caps_is_any (caps)) { GST_WARNING ("EMPTY or ANY caps not allowed"); return FALSE; } *p_caps = gst_caps_copy (caps); return TRUE; } /** * gst_missing_plugin_message_get_installer_detail: * @msg: a missing-plugin #GstMessage of type #GST_MESSAGE_ELEMENT * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. */ gchar * gst_missing_plugin_message_get_installer_detail (GstMessage * msg) { GstMissingType missing_type; const gchar *progname; const gchar *type; GString *str = NULL; gchar *detail = NULL; gchar *desc; g_return_val_if_fail (gst_is_missing_plugin_message (msg), NULL); GST_LOG ("Parsing missing-plugin message: %" GST_PTR_FORMAT, msg->structure); missing_type = missing_structure_get_type (msg->structure); if (missing_type == GST_MISSING_TYPE_UNKNOWN) { GST_WARNING ("couldn't parse 'type' field"); goto error; } type = gst_structure_get_string (msg->structure, "type"); g_assert (type != NULL); /* validity already checked above */ /* FIXME: use gst_installer_detail_new() here too */ str = g_string_new (GST_DETAIL_STRING_MARKER "|"); g_string_append_printf (str, "%u.%u|", GST_VERSION_MAJOR, GST_VERSION_MINOR); progname = (const gchar *) g_get_prgname (); if (progname) { g_string_append_printf (str, "%s|", progname); } else { g_string_append_printf (str, "pid/%lu|", (gulong) getpid ()); } desc = gst_missing_plugin_message_get_description (msg); if (desc) { g_strdelimit (desc, "|", '#'); g_string_append_printf (str, "%s|", desc); g_free (desc); } else { g_string_append (str, "|"); } switch (missing_type) { case GST_MISSING_TYPE_URISOURCE: case GST_MISSING_TYPE_URISINK: case GST_MISSING_TYPE_ELEMENT: if (!missing_structure_get_string_detail (msg->structure, &detail)) goto error; break; case GST_MISSING_TYPE_DECODER: case GST_MISSING_TYPE_ENCODER:{ GstCaps *caps = NULL; if (!missing_structure_get_caps_detail (msg->structure, &caps)) goto error; detail = gst_caps_to_string (caps); gst_caps_unref (caps); break; } default: g_return_val_if_reached (NULL); } g_string_append_printf (str, "%s-%s", type, detail); g_free (detail); return g_string_free (str, FALSE); /* ERRORS */ error: { GST_WARNING ("Failed to parse missing-plugin msg: %" GST_PTR_FORMAT, msg); if (str) g_string_free (str, TRUE); return NULL; } } /** * gst_missing_plugin_message_get_description: * @msg: a missing-plugin #GstMessage of type #GST_MESSAGE_ELEMENT * * Returns a localised string describing the missing feature, for use in * error dialogs and the like. Should never return NULL unless @msg is not * a valid missing-plugin message. * * This function is mainly for applications that need a human-readable string * describing a missing plugin, given a previously collected missing-plugin * message * * Returns: a newly-allocated description string, or NULL on error. Free * string with g_free() when not needed any longer. */ gchar * gst_missing_plugin_message_get_description (GstMessage * msg) { GstMissingType missing_type; const gchar *desc; gchar *ret = NULL; g_return_val_if_fail (gst_is_missing_plugin_message (msg), NULL); GST_LOG ("Parsing missing-plugin message: %" GST_PTR_FORMAT, msg->structure); desc = gst_structure_get_string (msg->structure, "name"); if (desc != NULL && *desc != '\0') { ret = g_strdup (desc); goto done; } /* fallback #1 */ missing_type = missing_structure_get_type (msg->structure); switch (missing_type) { case GST_MISSING_TYPE_URISOURCE: case GST_MISSING_TYPE_URISINK: case GST_MISSING_TYPE_ELEMENT:{ gchar *detail = NULL; if (missing_structure_get_string_detail (msg->structure, &detail)) { if (missing_type == GST_MISSING_TYPE_URISOURCE) ret = gst_pb_utils_get_source_description (detail); else if (missing_type == GST_MISSING_TYPE_URISINK) ret = gst_pb_utils_get_sink_description (detail); else ret = gst_pb_utils_get_sink_description (detail); g_free (detail); } break; } case GST_MISSING_TYPE_DECODER: case GST_MISSING_TYPE_ENCODER:{ GstCaps *caps = NULL; if (missing_structure_get_caps_detail (msg->structure, &caps)) { if (missing_type == GST_MISSING_TYPE_DECODER) ret = gst_pb_utils_get_decoder_description (caps); else ret = gst_pb_utils_get_encoder_description (caps); gst_caps_unref (caps); } break; } default: break; } if (ret) goto done; /* fallback #2 */ switch (missing_type) { case GST_MISSING_TYPE_URISOURCE: desc = _("Unknown source element"); break; case GST_MISSING_TYPE_URISINK: desc = _("Unknown sink element"); break; case GST_MISSING_TYPE_ELEMENT: desc = _("Unknown element"); break; case GST_MISSING_TYPE_DECODER: desc = _("Unknown decoder element"); break; case GST_MISSING_TYPE_ENCODER: desc = _("Unknown encoder element"); break; default: /* we should really never get here, but we better still return * something if we do */ desc = _("Plugin or element of unknown type"); break; } ret = g_strdup (desc); done: GST_LOG ("returning '%s'", ret); return ret; } /** * gst_is_missing_plugin_message: * @msg: a #GstMessage * * Checks whether @msg is a missing plugins message. * * Returns: %TRUE if @msg is a missing-plugins message, otherwise %FALSE. */ gboolean gst_is_missing_plugin_message (GstMessage * msg) { g_return_val_if_fail (msg != NULL, FALSE); g_return_val_if_fail (GST_IS_MESSAGE (msg), FALSE); if (GST_MESSAGE_TYPE (msg) != GST_MESSAGE_ELEMENT || msg->structure == NULL) return FALSE; return gst_structure_has_name (msg->structure, "missing-plugin"); } /* takes ownership of the description */ static gchar * gst_installer_detail_new (gchar * description, const gchar * type, const gchar * detail) { const gchar *progname; GString *s; s = g_string_new (GST_DETAIL_STRING_MARKER "|"); g_string_append_printf (s, "%u.%u|", GST_VERSION_MAJOR, GST_VERSION_MINOR); progname = (const gchar *) g_get_prgname (); if (progname) { g_string_append_printf (s, "%s|", progname); } else { g_string_append_printf (s, "pid/%lu|", (gulong) getpid ()); } if (description) { g_strdelimit (description, "|", '#'); g_string_append_printf (s, "%s|", description); g_free (description); } else { g_string_append (s, "|"); } g_string_append_printf (s, "%s-%s", type, detail); return g_string_free (s, FALSE); } /** * gst_missing_uri_source_installer_detail_new: * @protocol: the URI protocol the missing source needs to implement, * e.g. "http" or "mms" * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions in * the case where the application knows exactly what kind of plugin it is * missing. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. * * Since: 0.10.15 */ gchar * gst_missing_uri_source_installer_detail_new (const gchar * protocol) { gchar *desc; g_return_val_if_fail (protocol != NULL, NULL); desc = gst_pb_utils_get_source_description (protocol); return gst_installer_detail_new (desc, "urisource", protocol); } /** * gst_missing_uri_sink_installer_detail_new: * @protocol: the URI protocol the missing source needs to implement, * e.g. "http" or "mms" * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions in * the case where the application knows exactly what kind of plugin it is * missing. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. * * Since: 0.10.15 */ gchar * gst_missing_uri_sink_installer_detail_new (const gchar * protocol) { gchar *desc; g_return_val_if_fail (protocol != NULL, NULL); desc = gst_pb_utils_get_sink_description (protocol); return gst_installer_detail_new (desc, "urisink", protocol); } /** * gst_missing_element_installer_detail_new: * @factory_name: the name of the missing element (element factory), * e.g. "videoscale" or "cdparanoiasrc" * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions in * the case where the application knows exactly what kind of plugin it is * missing. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. * * Since: 0.10.15 */ gchar * gst_missing_element_installer_detail_new (const gchar * factory_name) { gchar *desc; g_return_val_if_fail (factory_name != NULL, NULL); desc = gst_pb_utils_get_element_description (factory_name); return gst_installer_detail_new (desc, "element", factory_name); } /** * gst_missing_decoder_installer_detail_new: * @decode_caps: the (fixed) caps for which a decoder element is needed * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions in * the case where the application knows exactly what kind of plugin it is * missing. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. * * Since: 0.10.15 */ gchar * gst_missing_decoder_installer_detail_new (const GstCaps * decode_caps) { GstCaps *caps; gchar *detail_str, *caps_str, *desc; g_return_val_if_fail (decode_caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (decode_caps), NULL); g_return_val_if_fail (!gst_caps_is_any (decode_caps), NULL); g_return_val_if_fail (!gst_caps_is_empty (decode_caps), NULL); g_return_val_if_fail (gst_caps_is_fixed (decode_caps), NULL); desc = gst_pb_utils_get_decoder_description (decode_caps); caps = copy_and_clean_caps (decode_caps); caps_str = gst_caps_to_string (caps); detail_str = gst_installer_detail_new (desc, "decoder", caps_str); g_free (caps_str); gst_caps_unref (caps); return detail_str; } /** * gst_missing_encoder_installer_detail_new: * @encode_caps: the (fixed) caps for which an encoder element is needed * * Returns an opaque string containing all the details about the missing * element to be passed to an external installer called via * gst_install_plugins_async() or gst_install_plugins_sync(). * * This function is mainly for applications that call external plugin * installation mechanisms using one of the two above-mentioned functions in * the case where the application knows exactly what kind of plugin it is * missing. * * Returns: a newly-allocated detail string, or NULL on error. Free string * with g_free() when not needed any longer. * * Since: 0.10.15 */ gchar * gst_missing_encoder_installer_detail_new (const GstCaps * encode_caps) { GstCaps *caps; gchar *detail_str, *caps_str, *desc; g_return_val_if_fail (encode_caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_any (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_empty (encode_caps), NULL); g_return_val_if_fail (gst_caps_is_fixed (encode_caps), NULL); desc = gst_pb_utils_get_encoder_description (encode_caps); caps = copy_and_clean_caps (encode_caps); caps_str = gst_caps_to_string (caps); detail_str = gst_installer_detail_new (desc, "encoder", caps_str); g_free (caps_str); gst_caps_unref (caps); return detail_str; }
166MMX/openjdk.java.net-openjfx-8u40-rt
modules/media/src/main/native/gstreamer/gstreamer-lite/gst-plugins-base/gst-libs/gst/pbutils/missing-plugins.c
C
gpl-2.0
25,421
/******************************************************************************/ /* Copyright (c) Crackerjack Project., 2007 */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See */ /* the GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software Foundation, */ /* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* */ /* History: Porting from Crackerjack to LTP is done by */ /* Manas Kumar Nayak maknayak@in.ibm.com> */ /******************************************************************************/ /******************************************************************************/ /* Description: This tests the rt_sigaction() syscall */ /* rt_sigaction Expected EFAULT error check */ /******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <errno.h> #include <sys/syscall.h> #include <string.h> #include "test.h" #include "linux_syscall_numbers.h" #include "lapi/rt_sigaction.h" char *TCID = "rt_sigaction02"; static int testno; int TST_TOTAL = 1; void cleanup(void) { tst_rmdir(); tst_exit(); } void setup(void) { TEST_PAUSE; tst_tmpdir(); } static int test_flags[] = { SA_RESETHAND | SA_SIGINFO, SA_RESETHAND, SA_RESETHAND | SA_SIGINFO, SA_RESETHAND | SA_SIGINFO, SA_NOMASK }; char *test_flags_list[] = { "SA_RESETHAND|SA_SIGINFO", "SA_RESETHAND", "SA_RESETHAND|SA_SIGINFO", "SA_RESETHAND|SA_SIGINFO", "SA_NOMASK" }; static struct test_case_t { int exp_errno; char *errdesc; } test_cases[] = { { EFAULT, "EFAULT"} }; int main(int ac, char **av) { unsigned int flag; int signal; int lc; tst_parse_opts(ac, av, NULL, NULL); setup(); for (lc = 0; TEST_LOOPING(lc); ++lc) { tst_count = 0; for (testno = 0; testno < TST_TOTAL; ++testno) { for (signal = SIGRTMIN; signal <= SIGRTMAX; signal++) { tst_resm(TINFO, "Signal %d", signal); for (flag = 0; flag < ARRAY_SIZE(test_flags); flag++) { /* * * long sys_rt_sigaction (int sig, const struct sigaction *act, * * truct sigaction *oact, size_t sigsetsize); * * EFAULT: * * An invalid act or oact value was specified * */ TEST(ltp_rt_sigaction(signal, INVAL_SA_PTR, NULL, SIGSETSIZE)); if ((TEST_RETURN == -1) && (TEST_ERRNO == test_cases[0].exp_errno)) { tst_resm(TINFO, "sa.sa_flags = %s ", test_flags_list[flag]); tst_resm(TPASS, "%s failure with sig: %d as expected errno = %s : %s", TCID, signal, test_cases[0].errdesc, strerror(TEST_ERRNO)); } else { tst_resm(TFAIL, "rt_sigaction call succeeded: result = %ld got error %d:but expected %d", TEST_RETURN, TEST_ERRNO, test_cases[0]. exp_errno); tst_resm(TINFO, "sa.sa_flags = %s ", test_flags_list[flag]); } } } } } cleanup(); tst_exit(); }
itnihao/ltp
testcases/kernel/syscalls/rt_sigaction/rt_sigaction02.c
C
gpl-2.0
4,240
/* * zsmalloc memory allocator * * Copyright (C) 2011 Nitin Gupta * Copyright (C) 2012, 2013 Minchan Kim * * This code is released using a dual license strategy: BSD/GPL * You can choose the license that better fits your requirements. * * Released under the terms of 3-clause BSD License * Released under the terms of GNU General Public License Version 2.0 */ /* * Following is how we use various fields and flags of underlying * struct page(s) to form a zspage. * * Usage of struct page fields: * page->first_page: points to the first component (0-order) page * page->index (union with page->freelist): offset of the first object * starting in this page. For the first page, this is * always 0, so we use this field (aka freelist) to point * to the first free object in zspage. * page->lru: links together all component pages (except the first page) * of a zspage * * For _first_ page only: * * page->private (union with page->first_page): refers to the * component page after the first page * If the page is first_page for huge object, it stores handle. * Look at size_class->huge. * page->freelist: points to the first free object in zspage. * Free objects are linked together using in-place * metadata. * page->objects: maximum number of objects we can store in this * zspage (class->zspage_order * PAGE_SIZE / class->size) * page->lru: links together first pages of various zspages. * Basically forming list of zspages in a fullness group. * page->mapping: class index and fullness group of the zspage * * Usage of struct page flags: * PG_private: identifies the first component page * PG_private2: identifies the last component page * */ #ifdef CONFIG_ZSMALLOC_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/debugfs.h> #include <linux/zsmalloc.h> #include <linux/zpool.h> /* * This must be power of 2 and greater than of equal to sizeof(link_free). * These two conditions ensure that any 'struct link_free' itself doesn't * span more than 1 page which avoids complex case of mapping 2 pages simply * to restore link_free pointer values. */ #define ZS_ALIGN 8 /* * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. */ #define ZS_MAX_ZSPAGE_ORDER 2 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) #define ZS_HANDLE_SIZE (sizeof(unsigned long)) /* * Object location (<PFN>, <obj_idx>) is encoded as * as single (unsigned long) handle value. * * Note that object index <obj_idx> is relative to system * page <PFN> it is stored in, so for each sub-page belonging * to a zspage, obj_idx starts with 0. * * This is made more complicated by various memory models and PAE. */ #ifndef MAX_PHYSMEM_BITS #ifdef CONFIG_HIGHMEM64G #define MAX_PHYSMEM_BITS 36 #else /* !CONFIG_HIGHMEM64G */ /* * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just * be PAGE_SHIFT */ #define MAX_PHYSMEM_BITS BITS_PER_LONG #endif #endif #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) /* * Memory for allocating for handle keeps object position by * encoding <page, obj_idx> and the encoded value has a room * in least bit(ie, look at obj_to_location). * We use the bit to synchronize between object access by * user and migration. */ #define HANDLE_PIN_BIT 0 /* * Head in allocated object should have OBJ_ALLOCATED_TAG * to identify the object was allocated or not. * It's okay to add the status bit in the least bit because * header keeps handle which is 4byte-aligned address so we * have room for two bit at least. */ #define OBJ_ALLOCATED_TAG 1 #define OBJ_TAG_BITS 1 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) #define MAX(a, b) ((a) >= (b) ? (a) : (b)) /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ #define ZS_MIN_ALLOC_SIZE \ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) /* each chunk includes extra space to keep handle */ #define ZS_MAX_ALLOC_SIZE PAGE_SIZE /* * On systems with 4K page size, this gives 255 size classes! There is a * trader-off here: * - Large number of size classes is potentially wasteful as free page are * spread across these classes * - Small number of size classes causes large internal fragmentation * - Probably its better to use specific size classes (empirically * determined). NOTE: all those class sizes must be set as multiple of * ZS_ALIGN to make sure link_free itself never has to span 2 pages. * * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN * (reason above) */ #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) /* * We do not maintain any list for completely empty or full pages */ enum fullness_group { ZS_ALMOST_FULL, ZS_ALMOST_EMPTY, _ZS_NR_FULLNESS_GROUPS, ZS_EMPTY, ZS_FULL }; enum zs_stat_type { OBJ_ALLOCATED, OBJ_USED, CLASS_ALMOST_FULL, CLASS_ALMOST_EMPTY, NR_ZS_STAT_TYPE, }; #ifdef CONFIG_ZSMALLOC_STAT static struct dentry *zs_stat_root; struct zs_size_stat { unsigned long objs[NR_ZS_STAT_TYPE]; }; #endif /* * number of size_classes */ static int zs_size_classes; /* * We assign a page to ZS_ALMOST_EMPTY fullness group when: * n <= N / f, where * n = number of allocated objects * N = total number of objects zspage can store * f = fullness_threshold_frac * * Similarly, we assign zspage to: * ZS_ALMOST_FULL when n > N / f * ZS_EMPTY when n == 0 * ZS_FULL when n == N * * (see: fix_fullness_group()) */ static const int fullness_threshold_frac = 4; struct size_class { /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. */ int size; unsigned int index; /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ int pages_per_zspage; /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ bool huge; #ifdef CONFIG_ZSMALLOC_STAT struct zs_size_stat stats; #endif spinlock_t lock; struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; }; /* * Placed within free objects to form a singly linked list. * For every zspage, first_page->freelist gives head of this list. * * This must be power of 2 and less than or equal to ZS_ALIGN */ struct link_free { union { /* * Position of next free chunk (encodes <PFN, obj_idx>) * It's valid for non-allocated object */ void *next; /* * Handle of allocated object. */ unsigned long handle; }; }; struct zs_pool { char *name; struct size_class **size_class; struct kmem_cache *handle_cachep; gfp_t flags; /* allocation flags used when growing pool */ atomic_long_t pages_allocated; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif }; /* * A zspage's class index and fullness group * are encoded in its (first)page->mapping */ #define CLASS_IDX_BITS 28 #define FULLNESS_BITS 4 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) struct mapping_area { #ifdef CONFIG_PGTABLE_MAPPING struct vm_struct *vm; /* vm area for mapping object that span pages */ #else char *vm_buf; /* copy buffer for objects that span pages */ #endif char *vm_addr; /* address of kmap_atomic()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ bool huge; }; static int create_handle_cache(struct zs_pool *pool) { pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 0, 0, NULL); return pool->handle_cachep ? 0 : 1; } static void destroy_handle_cache(struct zs_pool *pool) { if (pool->handle_cachep) kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) { return (unsigned long)kmem_cache_alloc(pool->handle_cachep, pool->flags & ~__GFP_HIGHMEM); } static void free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); } static void record_obj(unsigned long handle, unsigned long obj) { /* * lsb of @obj represents handle lock while other bits * represent object value the handle is pointing so * updating shouldn't do store tearing. */ WRITE_ONCE(*(unsigned long *)handle, obj); } /* zpool driver */ #ifdef CONFIG_ZPOOL static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops) { return zs_create_pool(name, gfp); } static void zs_zpool_destroy(void *pool) { zs_destroy_pool(pool); } static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { *handle = zs_malloc(pool, size); return *handle ? 0 : -1; } static void zs_zpool_free(void *pool, unsigned long handle) { zs_free(pool, handle); } static int zs_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) { return -EINVAL; } static void *zs_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { enum zs_mapmode zs_mm; switch (mm) { case ZPOOL_MM_RO: zs_mm = ZS_MM_RO; break; case ZPOOL_MM_WO: zs_mm = ZS_MM_WO; break; case ZPOOL_MM_RW: /* fallthru */ default: zs_mm = ZS_MM_RW; break; } return zs_map_object(pool, handle, zs_mm); } static void zs_zpool_unmap(void *pool, unsigned long handle) { zs_unmap_object(pool, handle); } static u64 zs_zpool_total_size(void *pool) { return zs_get_total_pages(pool) << PAGE_SHIFT; } static struct zpool_driver zs_zpool_driver = { .type = "zsmalloc", .owner = THIS_MODULE, .create = zs_zpool_create, .destroy = zs_zpool_destroy, .malloc = zs_zpool_malloc, .free = zs_zpool_free, .shrink = zs_zpool_shrink, .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, }; MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) { return pages_per_zspage * PAGE_SIZE / size; } /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); static int is_first_page(struct page *page) { return PagePrivate(page); } static int is_last_page(struct page *page) { return PagePrivate2(page); } static void get_zspage_mapping(struct page *page, unsigned int *class_idx, enum fullness_group *fullness) { unsigned long m; BUG_ON(!is_first_page(page)); m = (unsigned long)page->mapping; *fullness = m & FULLNESS_MASK; *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; } static void set_zspage_mapping(struct page *page, unsigned int class_idx, enum fullness_group fullness) { unsigned long m; BUG_ON(!is_first_page(page)); m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | (fullness & FULLNESS_MASK); page->mapping = (struct address_space *)m; } /* * zsmalloc divides the pool into various size classes where each * class maintains a list of zspages where each zspage is divided * into equal sized chunks. Each allocation falls into one of these * classes depending on its size. This function returns index of the * size class which has chunk size big enough to hold the give size. */ static int get_size_class_index(int size) { int idx = 0; if (likely(size > ZS_MIN_ALLOC_SIZE)) idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, ZS_SIZE_CLASS_DELTA); return min(zs_size_classes - 1, idx); } #ifdef CONFIG_ZSMALLOC_STAT static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { class->stats.objs[type] += cnt; } static inline void zs_stat_dec(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { class->stats.objs[type] -= cnt; } static inline unsigned long zs_stat_get(struct size_class *class, enum zs_stat_type type) { return class->stats.objs[type]; } static int __init zs_stat_init(void) { if (!debugfs_initialized()) return -ENODEV; zs_stat_root = debugfs_create_dir("zsmalloc", NULL); if (!zs_stat_root) return -ENOMEM; return 0; } static void __exit zs_stat_exit(void) { debugfs_remove_recursive(zs_stat_root); } static int zs_stats_size_show(struct seq_file *s, void *v) { int i; struct zs_pool *pool = s->private; struct size_class *class; int objs_per_zspage; unsigned long class_almost_full, class_almost_empty; unsigned long obj_allocated, obj_used, pages_used; unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n", "class", "size", "almost_full", "almost_empty", "obj_allocated", "obj_used", "pages_used", "pages_per_zspage"); for (i = 0; i < zs_size_classes; i++) { class = pool->size_class[i]; if (class->index != i) continue; spin_lock(&class->lock); class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); obj_used = zs_stat_get(class, OBJ_USED); spin_unlock(&class->lock); objs_per_zspage = get_maxobj_per_zspage(class->size, class->pages_per_zspage); pages_used = obj_allocated / objs_per_zspage * class->pages_per_zspage; seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n", i, class->size, class_almost_full, class_almost_empty, obj_allocated, obj_used, pages_used, class->pages_per_zspage); total_class_almost_full += class_almost_full; total_class_almost_empty += class_almost_empty; total_objs += obj_allocated; total_used_objs += obj_used; total_pages += pages_used; } seq_puts(s, "\n"); seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n", "Total", "", total_class_almost_full, total_class_almost_empty, total_objs, total_used_objs, total_pages); return 0; } static int zs_stats_size_open(struct inode *inode, struct file *file) { return single_open(file, zs_stats_size_show, inode->i_private); } static const struct file_operations zs_stat_size_ops = { .open = zs_stats_size_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int zs_pool_stat_create(char *name, struct zs_pool *pool) { struct dentry *entry; if (!zs_stat_root) return -ENODEV; entry = debugfs_create_dir(name, zs_stat_root); if (!entry) { pr_warn("debugfs dir <%s> creation failed\n", name); return -ENOMEM; } pool->stat_dentry = entry; entry = debugfs_create_file("classes", S_IFREG | S_IRUGO, pool->stat_dentry, pool, &zs_stat_size_ops); if (!entry) { pr_warn("%s: debugfs file entry <%s> creation failed\n", name, "classes"); return -ENOMEM; } return 0; } static void zs_pool_stat_destroy(struct zs_pool *pool) { debugfs_remove_recursive(pool->stat_dentry); } #else /* CONFIG_ZSMALLOC_STAT */ static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { } static inline void zs_stat_dec(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { } static inline unsigned long zs_stat_get(struct size_class *class, enum zs_stat_type type) { return 0; } static int __init zs_stat_init(void) { return 0; } static void __exit zs_stat_exit(void) { } static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) { return 0; } static inline void zs_pool_stat_destroy(struct zs_pool *pool) { } #endif /* * For each size class, zspages are divided into different groups * depending on how "full" they are. This was done so that we could * easily find empty or nearly empty zspages when we try to shrink * the pool (not yet implemented). This function returns fullness * status of the given page. */ static enum fullness_group get_fullness_group(struct page *page) { int inuse, max_objects; enum fullness_group fg; BUG_ON(!is_first_page(page)); inuse = page->inuse; max_objects = page->objects; if (inuse == 0) fg = ZS_EMPTY; else if (inuse == max_objects) fg = ZS_FULL; else if (inuse <= 3 * max_objects / fullness_threshold_frac) fg = ZS_ALMOST_EMPTY; else fg = ZS_ALMOST_FULL; return fg; } /* * Each size class maintains various freelists and zspages are assigned * to one of these freelists based on the number of live objects they * have. This functions inserts the given zspage into the freelist * identified by <class, fullness_group>. */ static void insert_zspage(struct page *page, struct size_class *class, enum fullness_group fullness) { struct page **head; BUG_ON(!is_first_page(page)); if (fullness >= _ZS_NR_FULLNESS_GROUPS) return; head = &class->fullness_list[fullness]; if (*head) list_add_tail(&page->lru, &(*head)->lru); *head = page; zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); } /* * This function removes the given zspage from the freelist identified * by <class, fullness_group>. */ static void remove_zspage(struct page *page, struct size_class *class, enum fullness_group fullness) { struct page **head; BUG_ON(!is_first_page(page)); if (fullness >= _ZS_NR_FULLNESS_GROUPS) return; head = &class->fullness_list[fullness]; BUG_ON(!*head); if (list_empty(&(*head)->lru)) *head = NULL; else if (*head == page) *head = (struct page *)list_entry((*head)->lru.next, struct page, lru); list_del_init(&page->lru); zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); } /* * Each size class maintains zspages in different fullness groups depending * on the number of live objects they contain. When allocating or freeing * objects, the fullness status of the page can change, say, from ALMOST_FULL * to ALMOST_EMPTY when freeing an object. This function checks if such * a status change has occurred for the given page and accordingly moves the * page from the freelist of the old fullness group to that of the new * fullness group. */ static enum fullness_group fix_fullness_group(struct size_class *class, struct page *page) { int class_idx; enum fullness_group currfg, newfg; BUG_ON(!is_first_page(page)); get_zspage_mapping(page, &class_idx, &currfg); newfg = get_fullness_group(page); if (newfg == currfg) goto out; remove_zspage(page, class, currfg); insert_zspage(page, class, newfg); set_zspage_mapping(page, class_idx, newfg); out: return newfg; } /* * We have to decide on how many pages to link together * to form a zspage for each size class. This is important * to reduce wastage due to unusable space left at end of * each zspage which is given as: * wastage = Zp % class_size * usage = Zp - wastage * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... * * For example, for size class of 3/8 * PAGE_SIZE, we should * link together 3 PAGE_SIZE sized pages to form a zspage * since then we can perfectly fit in 8 such objects. */ static int get_pages_per_zspage(int class_size) { int i, max_usedpc = 0; /* zspage order which gives maximum used size per KB */ int max_usedpc_order = 1; for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { int zspage_size; int waste, usedpc; zspage_size = i * PAGE_SIZE; waste = zspage_size % class_size; usedpc = (zspage_size - waste) * 100 / zspage_size; if (usedpc > max_usedpc) { max_usedpc = usedpc; max_usedpc_order = i; } } return max_usedpc_order; } /* * A single 'zspage' is composed of many system pages which are * linked together using fields in struct page. This function finds * the first/head page, given any component page of a zspage. */ static struct page *get_first_page(struct page *page) { if (is_first_page(page)) return page; else return page->first_page; } static struct page *get_next_page(struct page *page) { struct page *next; if (is_last_page(page)) next = NULL; else if (is_first_page(page)) next = (struct page *)page_private(page); else next = list_entry(page->lru.next, struct page, lru); return next; } /* * Encode <page, obj_idx> as a single handle value. * We use the least bit of handle for tagging. */ static void *location_to_obj(struct page *page, unsigned long obj_idx) { unsigned long obj; if (!page) { BUG_ON(obj_idx); return NULL; } obj = page_to_pfn(page) << OBJ_INDEX_BITS; obj |= ((obj_idx) & OBJ_INDEX_MASK); obj <<= OBJ_TAG_BITS; return (void *)obj; } /* * Decode <page, obj_idx> pair from the given object handle. We adjust the * decoded obj_idx back to its original value since it was adjusted in * location_to_obj(). */ static void obj_to_location(unsigned long obj, struct page **page, unsigned long *obj_idx) { obj >>= OBJ_TAG_BITS; *page = pfn_to_page(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } static unsigned long handle_to_obj(unsigned long handle) { return *(unsigned long *)handle; } static unsigned long obj_to_head(struct size_class *class, struct page *page, void *obj) { if (class->huge) { VM_BUG_ON(!is_first_page(page)); return *(unsigned long *)page_private(page); } else return *(unsigned long *)obj; } static unsigned long obj_idx_to_offset(struct page *page, unsigned long obj_idx, int class_size) { unsigned long off = 0; if (!is_first_page(page)) off = page->index; return off + obj_idx * class_size; } static inline int trypin_tag(unsigned long handle) { unsigned long *ptr = (unsigned long *)handle; return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr); } static void pin_tag(unsigned long handle) { while (!trypin_tag(handle)); } static void unpin_tag(unsigned long handle) { unsigned long *ptr = (unsigned long *)handle; clear_bit_unlock(HANDLE_PIN_BIT, ptr); } static void reset_page(struct page *page) { clear_bit(PG_private, &page->flags); clear_bit(PG_private_2, &page->flags); set_page_private(page, 0); page->mapping = NULL; page->freelist = NULL; page_mapcount_reset(page); } static void free_zspage(struct page *first_page) { struct page *nextp, *tmp, *head_extra; BUG_ON(!is_first_page(first_page)); BUG_ON(first_page->inuse); head_extra = (struct page *)page_private(first_page); reset_page(first_page); __free_page(first_page); /* zspage with only 1 system page */ if (!head_extra) return; list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { list_del(&nextp->lru); reset_page(nextp); __free_page(nextp); } reset_page(head_extra); __free_page(head_extra); } /* Initialize a newly allocated zspage */ static void init_zspage(struct page *first_page, struct size_class *class) { unsigned long off = 0; struct page *page = first_page; BUG_ON(!is_first_page(first_page)); while (page) { struct page *next_page; struct link_free *link; unsigned int i = 1; void *vaddr; /* * page->index stores offset of first object starting * in the page. For the first page, this is always 0, * so we use first_page->index (aka ->freelist) to store * head of corresponding zspage's freelist. */ if (page != first_page) page->index = off; vaddr = kmap_atomic(page); link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { link->next = location_to_obj(page, i++); link += class->size / sizeof(*link); } /* * We now come to the last (full or partial) object on this * page, which must point to the first object on the next * page (if present) */ next_page = get_next_page(page); link->next = location_to_obj(next_page, 0); kunmap_atomic(vaddr); page = next_page; off %= PAGE_SIZE; } } /* * Allocate a zspage for the given size class */ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) { int i, error; struct page *first_page = NULL, *uninitialized_var(prev_page); /* * Allocate individual pages and link them together as: * 1. first page->private = first sub-page * 2. all sub-pages are linked together using page->lru * 3. each sub-page is linked to the first page using page->first_page * * For each size class, First/Head pages are linked together using * page->lru. Also, we set PG_private to identify the first page * (i.e. no other sub-page has this flag set) and PG_private_2 to * identify the last page. */ error = -ENOMEM; for (i = 0; i < class->pages_per_zspage; i++) { struct page *page; page = alloc_page(flags); if (!page) goto cleanup; INIT_LIST_HEAD(&page->lru); if (i == 0) { /* first page */ SetPagePrivate(page); set_page_private(page, 0); first_page = page; first_page->inuse = 0; } if (i == 1) set_page_private(first_page, (unsigned long)page); if (i >= 1) page->first_page = first_page; if (i >= 2) list_add(&page->lru, &prev_page->lru); if (i == class->pages_per_zspage - 1) /* last page */ SetPagePrivate2(page); prev_page = page; } init_zspage(first_page, class); first_page->freelist = location_to_obj(first_page, 0); /* Maximum number of objects we can store in this zspage */ first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; error = 0; /* Success */ cleanup: if (unlikely(error) && first_page) { free_zspage(first_page); first_page = NULL; } return first_page; } static struct page *find_get_zspage(struct size_class *class) { int i; struct page *page; for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { page = class->fullness_list[i]; if (page) break; } return page; } #ifdef CONFIG_PGTABLE_MAPPING static inline int __zs_cpu_up(struct mapping_area *area) { /* * Make sure we don't leak memory if a cpu UP notification * and zs_init() race and both call zs_cpu_up() on the same cpu */ if (area->vm) return 0; area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); if (!area->vm) return -ENOMEM; return 0; } static inline void __zs_cpu_down(struct mapping_area *area) { if (area->vm) free_vm_area(area->vm); area->vm = NULL; } static inline void *__zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) { BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); area->vm_addr = area->vm->addr; return area->vm_addr + off; } static inline void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { unsigned long addr = (unsigned long)area->vm_addr; unmap_kernel_range(addr, PAGE_SIZE * 2); } #else /* CONFIG_PGTABLE_MAPPING */ static inline int __zs_cpu_up(struct mapping_area *area) { /* * Make sure we don't leak memory if a cpu UP notification * and zs_init() race and both call zs_cpu_up() on the same cpu */ if (area->vm_buf) return 0; area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); if (!area->vm_buf) return -ENOMEM; return 0; } static inline void __zs_cpu_down(struct mapping_area *area) { kfree(area->vm_buf); area->vm_buf = NULL; } static void *__zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) { int sizes[2]; void *addr; char *buf = area->vm_buf; /* disable page faults to match kmap_atomic() return conditions */ pagefault_disable(); /* no read fastpath */ if (area->vm_mm == ZS_MM_WO) goto out; sizes[0] = PAGE_SIZE - off; sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ addr = kmap_atomic(pages[0]); memcpy(buf, addr + off, sizes[0]); kunmap_atomic(addr); addr = kmap_atomic(pages[1]); memcpy(buf + sizes[0], addr, sizes[1]); kunmap_atomic(addr); out: return area->vm_buf; } static void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { int sizes[2]; void *addr; char *buf; /* no write fastpath */ if (area->vm_mm == ZS_MM_RO) goto out; buf = area->vm_buf; if (!area->huge) { buf = buf + ZS_HANDLE_SIZE; size -= ZS_HANDLE_SIZE; off += ZS_HANDLE_SIZE; } sizes[0] = PAGE_SIZE - off; sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ addr = kmap_atomic(pages[0]); memcpy(addr + off, buf, sizes[0]); kunmap_atomic(addr); addr = kmap_atomic(pages[1]); memcpy(addr, buf + sizes[0], sizes[1]); kunmap_atomic(addr); out: /* enable page faults to match kunmap_atomic() return conditions */ pagefault_enable(); } #endif /* CONFIG_PGTABLE_MAPPING */ static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, void *pcpu) { int ret, cpu = (long)pcpu; struct mapping_area *area; switch (action) { case CPU_UP_PREPARE: area = &per_cpu(zs_map_area, cpu); ret = __zs_cpu_up(area); if (ret) return notifier_from_errno(ret); break; case CPU_DEAD: case CPU_UP_CANCELED: area = &per_cpu(zs_map_area, cpu); __zs_cpu_down(area); break; } return NOTIFY_OK; } static struct notifier_block zs_cpu_nb = { .notifier_call = zs_cpu_notifier }; static int zs_register_cpu_notifier(void) { int cpu, uninitialized_var(ret); cpu_notifier_register_begin(); __register_cpu_notifier(&zs_cpu_nb); for_each_online_cpu(cpu) { ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); if (notifier_to_errno(ret)) break; } cpu_notifier_register_done(); return notifier_to_errno(ret); } static void zs_unregister_cpu_notifier(void) { int cpu; cpu_notifier_register_begin(); for_each_online_cpu(cpu) zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); __unregister_cpu_notifier(&zs_cpu_nb); cpu_notifier_register_done(); } static void init_zs_size_classes(void) { int nr; nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) nr += 1; zs_size_classes = nr; } static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) { if (prev->pages_per_zspage != pages_per_zspage) return false; if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) != get_maxobj_per_zspage(size, pages_per_zspage)) return false; return true; } static bool zspage_full(struct page *page) { BUG_ON(!is_first_page(page)); return page->inuse == page->objects; } unsigned long zs_get_total_pages(struct zs_pool *pool) { return atomic_long_read(&pool->pages_allocated); } EXPORT_SYMBOL_GPL(zs_get_total_pages); /** * zs_map_object - get address of allocated object from handle. * @pool: pool from which the object was allocated * @handle: handle returned from zs_malloc * * Before using an object allocated from zs_malloc, it must be mapped using * this function. When done with the object, it must be unmapped using * zs_unmap_object. * * Only one object can be mapped per cpu at a time. There is no protection * against nested mappings. * * This function returns with preemption and page faults disabled. */ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm) { struct page *page; unsigned long obj, obj_idx, off; unsigned int class_idx; enum fullness_group fg; struct size_class *class; struct mapping_area *area; struct page *pages[2]; void *ret; BUG_ON(!handle); /* * Because we use per-cpu mapping areas shared among the * pools/users, we can't allow mapping in interrupt context * because it can corrupt another users mappings. */ BUG_ON(in_interrupt()); /* From now on, migration cannot move the object */ pin_tag(handle); obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); get_zspage_mapping(get_first_page(page), &class_idx, &fg); class = pool->size_class[class_idx]; off = obj_idx_to_offset(page, obj_idx, class->size); area = &get_cpu_var(zs_map_area); area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ area->vm_addr = kmap_atomic(page); ret = area->vm_addr + off; goto out; } /* this object spans two pages */ pages[0] = page; pages[1] = get_next_page(page); BUG_ON(!pages[1]); ret = __zs_map_object(area, pages, off, class->size); out: if (!class->huge) ret += ZS_HANDLE_SIZE; return ret; } EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct page *page; unsigned long obj, obj_idx, off; unsigned int class_idx; enum fullness_group fg; struct size_class *class; struct mapping_area *area; BUG_ON(!handle); obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); get_zspage_mapping(get_first_page(page), &class_idx, &fg); class = pool->size_class[class_idx]; off = obj_idx_to_offset(page, obj_idx, class->size); area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) kunmap_atomic(area->vm_addr); else { struct page *pages[2]; pages[0] = page; pages[1] = get_next_page(page); BUG_ON(!pages[1]); __zs_unmap_object(area, pages, off, class->size); } put_cpu_var(zs_map_area); unpin_tag(handle); } EXPORT_SYMBOL_GPL(zs_unmap_object); static unsigned long obj_malloc(struct page *first_page, struct size_class *class, unsigned long handle) { unsigned long obj; struct link_free *link; struct page *m_page; unsigned long m_objidx, m_offset; void *vaddr; handle |= OBJ_ALLOCATED_TAG; obj = (unsigned long)first_page->freelist; obj_to_location(obj, &m_page, &m_objidx); m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); first_page->freelist = link->next; if (!class->huge) /* record handle in the header of allocated chunk */ link->handle = handle; else /* record handle in first_page->private */ set_page_private(first_page, handle); kunmap_atomic(vaddr); first_page->inuse++; zs_stat_inc(class, OBJ_USED, 1); return obj; } /** * zs_malloc - Allocate block of given size from pool. * @pool: pool to allocate from * @size: size of block to allocate * * On success, handle to the allocated object is returned, * otherwise 0. * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. */ unsigned long zs_malloc(struct zs_pool *pool, size_t size) { unsigned long handle, obj; struct size_class *class; struct page *first_page; if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) return 0; handle = alloc_handle(pool); if (!handle) return 0; /* extra space in chunk to keep the handle */ size += ZS_HANDLE_SIZE; class = pool->size_class[get_size_class_index(size)]; spin_lock(&class->lock); first_page = find_get_zspage(class); if (!first_page) { spin_unlock(&class->lock); first_page = alloc_zspage(class, pool->flags); if (unlikely(!first_page)) { free_handle(pool, handle); return 0; } set_zspage_mapping(first_page, class->index, ZS_EMPTY); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); spin_lock(&class->lock); zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( class->size, class->pages_per_zspage)); } obj = obj_malloc(first_page, class, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, first_page); record_obj(handle, obj); spin_unlock(&class->lock); return handle; } EXPORT_SYMBOL_GPL(zs_malloc); static void obj_free(struct zs_pool *pool, struct size_class *class, unsigned long obj) { struct link_free *link; struct page *first_page, *f_page; unsigned long f_objidx, f_offset; void *vaddr; int class_idx; enum fullness_group fullness; BUG_ON(!obj); obj &= ~OBJ_ALLOCATED_TAG; obj_to_location(obj, &f_page, &f_objidx); first_page = get_first_page(f_page); get_zspage_mapping(first_page, &class_idx, &fullness); f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); vaddr = kmap_atomic(f_page); /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); link->next = first_page->freelist; if (class->huge) set_page_private(first_page, 0); kunmap_atomic(vaddr); first_page->freelist = (void *)obj; first_page->inuse--; zs_stat_dec(class, OBJ_USED, 1); } void zs_free(struct zs_pool *pool, unsigned long handle) { struct page *first_page, *f_page; unsigned long obj, f_objidx; int class_idx; struct size_class *class; enum fullness_group fullness; if (unlikely(!handle)) return; pin_tag(handle); obj = handle_to_obj(handle); obj_to_location(obj, &f_page, &f_objidx); first_page = get_first_page(f_page); get_zspage_mapping(first_page, &class_idx, &fullness); class = pool->size_class[class_idx]; spin_lock(&class->lock); obj_free(pool, class, obj); fullness = fix_fullness_group(class, first_page); if (fullness == ZS_EMPTY) { zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( class->size, class->pages_per_zspage)); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); free_zspage(first_page); } spin_unlock(&class->lock); unpin_tag(handle); free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(unsigned long src, unsigned long dst, struct size_class *class) { struct page *s_page, *d_page; unsigned long s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; int s_size, d_size, size; int written = 0; s_size = d_size = class->size; obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); s_off = obj_idx_to_offset(s_page, s_objidx, class->size); d_off = obj_idx_to_offset(d_page, d_objidx, class->size); if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; s_addr = kmap_atomic(s_page); d_addr = kmap_atomic(d_page); while (1) { size = min(s_size, d_size); memcpy(d_addr + d_off, s_addr + s_off, size); written += size; if (written == class->size) break; s_off += size; s_size -= size; d_off += size; d_size -= size; if (s_off >= PAGE_SIZE) { kunmap_atomic(d_addr); kunmap_atomic(s_addr); s_page = get_next_page(s_page); BUG_ON(!s_page); s_addr = kmap_atomic(s_page); d_addr = kmap_atomic(d_page); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { kunmap_atomic(d_addr); d_page = get_next_page(d_page); BUG_ON(!d_page); d_addr = kmap_atomic(d_page); d_size = class->size - written; d_off = 0; } } kunmap_atomic(d_addr); kunmap_atomic(s_addr); } /* * Find alloced object in zspage from index object and * return handle. */ static unsigned long find_alloced_obj(struct page *page, int index, struct size_class *class) { unsigned long head; int offset = 0; unsigned long handle = 0; void *addr = kmap_atomic(page); if (!is_first_page(page)) offset = page->index; offset += class->size * index; while (offset < PAGE_SIZE) { head = obj_to_head(class, page, addr + offset); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (trypin_tag(handle)) break; handle = 0; } offset += class->size; index++; } kunmap_atomic(addr); return handle; } struct zs_compact_control { /* Source page for migration which could be a subpage of zspage. */ struct page *s_page; /* Destination page for migration which should be a first page * of zspage. */ struct page *d_page; /* Starting object index within @s_page which used for live object * in the subpage. */ int index; /* how many of objects are migrated */ int nr_migrated; }; static int migrate_zspage(struct zs_pool *pool, struct size_class *class, struct zs_compact_control *cc) { unsigned long used_obj, free_obj; unsigned long handle; struct page *s_page = cc->s_page; struct page *d_page = cc->d_page; unsigned long index = cc->index; int nr_migrated = 0; int ret = 0; while (1) { handle = find_alloced_obj(s_page, index, class); if (!handle) { s_page = get_next_page(s_page); if (!s_page) break; index = 0; continue; } /* Stop if there is no more space */ if (zspage_full(d_page)) { unpin_tag(handle); ret = -ENOMEM; break; } used_obj = handle_to_obj(handle); free_obj = obj_malloc(d_page, class, handle); zs_object_copy(used_obj, free_obj, class); index++; /* * record_obj updates handle's value to free_obj and it will * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which * breaks synchronization using pin_tag(e,g, zs_free) so * let's keep the lock bit. */ free_obj |= BIT(HANDLE_PIN_BIT); record_obj(handle, free_obj); unpin_tag(handle); obj_free(pool, class, used_obj); nr_migrated++; } /* Remember last position in this iteration */ cc->s_page = s_page; cc->index = index; cc->nr_migrated = nr_migrated; return ret; } static struct page *alloc_target_page(struct size_class *class) { int i; struct page *page; for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { page = class->fullness_list[i]; if (page) { remove_zspage(page, class, i); break; } } return page; } static void putback_zspage(struct zs_pool *pool, struct size_class *class, struct page *first_page) { enum fullness_group fullness; BUG_ON(!is_first_page(first_page)); fullness = get_fullness_group(first_page); insert_zspage(first_page, class, fullness); set_zspage_mapping(first_page, class->index, fullness); if (fullness == ZS_EMPTY) { zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( class->size, class->pages_per_zspage)); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); free_zspage(first_page); } } static struct page *isolate_source_page(struct size_class *class) { struct page *page; page = class->fullness_list[ZS_ALMOST_EMPTY]; if (page) remove_zspage(page, class, ZS_ALMOST_EMPTY); return page; } static unsigned long __zs_compact(struct zs_pool *pool, struct size_class *class) { int nr_to_migrate; struct zs_compact_control cc; struct page *src_page; struct page *dst_page = NULL; unsigned long nr_total_migrated = 0; spin_lock(&class->lock); while ((src_page = isolate_source_page(class))) { BUG_ON(!is_first_page(src_page)); /* The goal is to migrate all live objects in source page */ nr_to_migrate = src_page->inuse; cc.index = 0; cc.s_page = src_page; while ((dst_page = alloc_target_page(class))) { cc.d_page = dst_page; /* * If there is no more space in dst_page, try to * allocate another zspage. */ if (!migrate_zspage(pool, class, &cc)) break; putback_zspage(pool, class, dst_page); nr_total_migrated += cc.nr_migrated; nr_to_migrate -= cc.nr_migrated; } /* Stop if we couldn't find slot */ if (dst_page == NULL) break; putback_zspage(pool, class, dst_page); putback_zspage(pool, class, src_page); spin_unlock(&class->lock); nr_total_migrated += cc.nr_migrated; cond_resched(); spin_lock(&class->lock); } if (src_page) putback_zspage(pool, class, src_page); spin_unlock(&class->lock); return nr_total_migrated; } unsigned long zs_compact(struct zs_pool *pool) { int i; unsigned long nr_migrated = 0; struct size_class *class; for (i = zs_size_classes - 1; i >= 0; i--) { class = pool->size_class[i]; if (!class) continue; if (class->index != i) continue; nr_migrated += __zs_compact(pool, class); } return nr_migrated; } EXPORT_SYMBOL_GPL(zs_compact); /** * zs_create_pool - Creates an allocation pool to work from. * @flags: allocation flags used to allocate pool metadata * * This function must be called before anything when using * the zsmalloc allocator. * * On success, a pointer to the newly created pool is returned, * otherwise NULL. */ struct zs_pool *zs_create_pool(char *name, gfp_t flags) { int i; struct zs_pool *pool; struct size_class *prev_class = NULL; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), GFP_KERNEL); if (!pool->size_class) { kfree(pool); return NULL; } pool->name = kstrdup(name, GFP_KERNEL); if (!pool->name) goto err; if (create_handle_cache(pool)) goto err; /* * Iterate reversly, because, size of size_class that we want to use * for merging should be larger or equal to current size. */ for (i = zs_size_classes - 1; i >= 0; i--) { int size; int pages_per_zspage; struct size_class *class; size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; if (size > ZS_MAX_ALLOC_SIZE) size = ZS_MAX_ALLOC_SIZE; pages_per_zspage = get_pages_per_zspage(size); /* * size_class is used for normal zsmalloc operation such * as alloc/free for that size. Although it is natural that we * have one size_class for each size, there is a chance that we * can get more memory utilization if we use one size_class for * many different sizes whose size_class have same * characteristics. So, we makes size_class point to * previous size_class if possible. */ if (prev_class) { if (can_merge(prev_class, size, pages_per_zspage)) { pool->size_class[i] = prev_class; continue; } } class = kzalloc(sizeof(struct size_class), GFP_KERNEL); if (!class) goto err; class->size = size; class->index = i; class->pages_per_zspage = pages_per_zspage; if (pages_per_zspage == 1 && get_maxobj_per_zspage(size, pages_per_zspage) == 1) class->huge = true; spin_lock_init(&class->lock); pool->size_class[i] = class; prev_class = class; } pool->flags = flags; if (zs_pool_stat_create(name, pool)) goto err; return pool; err: zs_destroy_pool(pool); return NULL; } EXPORT_SYMBOL_GPL(zs_create_pool); void zs_destroy_pool(struct zs_pool *pool) { int i; zs_pool_stat_destroy(pool); for (i = 0; i < zs_size_classes; i++) { int fg; struct size_class *class = pool->size_class[i]; if (!class) continue; if (class->index != i) continue; for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { if (class->fullness_list[fg]) { pr_info("Freeing non-empty class with size %db, fullness group %d\n", class->size, fg); } } kfree(class); } destroy_handle_cache(pool); kfree(pool->size_class); kfree(pool->name); kfree(pool); } EXPORT_SYMBOL_GPL(zs_destroy_pool); static int __init zs_init(void) { int ret = zs_register_cpu_notifier(); if (ret) goto notifier_fail; init_zs_size_classes(); #ifdef CONFIG_ZPOOL zpool_register_driver(&zs_zpool_driver); #endif ret = zs_stat_init(); if (ret) { pr_err("zs stat initialization failed\n"); goto stat_fail; } return 0; stat_fail: #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif notifier_fail: zs_unregister_cpu_notifier(); return ret; } static void __exit zs_exit(void) { #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif zs_unregister_cpu_notifier(); zs_stat_exit(); } module_init(zs_init); module_exit(zs_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
amarchandole/capprobe_mptcp
mm/zsmalloc.c
C
gpl-2.0
46,986
/* * QEMU i8255x (PRO100) emulation * * Copyright (C) 2006-2011 Stefan Weil * * Portions of the code are copies from grub / etherboot eepro100.c * and linux e100.c. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) version 3 or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Tested features (i82559): * PXE boot (i386 guest, i386 / mips / mipsel / ppc host) ok * Linux networking (i386) ok * * Untested: * Windows networking * * References: * * Intel 8255x 10/100 Mbps Ethernet Controller Family * Open Source Software Developer Manual * * TODO: * * PHY emulation should be separated from nic emulation. * Most nic emulations could share the same phy code. * * i82550 is untested. It is programmed like the i82559. * * i82562 is untested. It is programmed like the i82559. * * Power management (i82558 and later) is not implemented. * * Wake-on-LAN is not implemented. */ #include <stddef.h> /* offsetof */ #include "hw.h" #include "pci.h" #include "net.h" #include "eeprom93xx.h" #include "sysemu.h" #include "dma.h" /* QEMU sends frames smaller than 60 bytes to ethernet nics. * Such frames are rejected by real nics and their emulations. * To avoid this behaviour, other nic emulations pad received * frames. The following definition enables this padding for * eepro100, too. We keep the define around in case it might * become useful the future if the core networking is ever * changed to pad short packets itself. */ #define CONFIG_PAD_RECEIVED_FRAMES #define KiB 1024 /* Debug EEPRO100 card. */ #if 0 # define DEBUG_EEPRO100 #endif #ifdef DEBUG_EEPRO100 #define logout(fmt, ...) fprintf(stderr, "EE100\t%-24s" fmt, __func__, ## __VA_ARGS__) #else #define logout(fmt, ...) ((void)0) #endif /* Set flags to 0 to disable debug output. */ #define INT 1 /* interrupt related actions */ #define MDI 1 /* mdi related actions */ #define OTHER 1 #define RXTX 1 #define EEPROM 1 /* eeprom related actions */ #define TRACE(flag, command) ((flag) ? (command) : (void)0) #define missing(text) fprintf(stderr, "eepro100: feature is missing in this emulation: " text "\n") #define MAX_ETH_FRAME_SIZE 1514 /* This driver supports several different devices which are declared here. */ #define i82550 0x82550 #define i82551 0x82551 #define i82557A 0x82557a #define i82557B 0x82557b #define i82557C 0x82557c #define i82558A 0x82558a #define i82558B 0x82558b #define i82559A 0x82559a #define i82559B 0x82559b #define i82559C 0x82559c #define i82559ER 0x82559e #define i82562 0x82562 #define i82801 0x82801 /* Use 64 word EEPROM. TODO: could be a runtime option. */ #define EEPROM_SIZE 64 #define PCI_MEM_SIZE (4 * KiB) #define PCI_IO_SIZE 64 #define PCI_FLASH_SIZE (128 * KiB) #define BIT(n) (1 << (n)) #define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) /* The SCB accepts the following controls for the Tx and Rx units: */ #define CU_NOP 0x0000 /* No operation. */ #define CU_START 0x0010 /* CU start. */ #define CU_RESUME 0x0020 /* CU resume. */ #define CU_STATSADDR 0x0040 /* Load dump counters address. */ #define CU_SHOWSTATS 0x0050 /* Dump statistical counters. */ #define CU_CMD_BASE 0x0060 /* Load CU base address. */ #define CU_DUMPSTATS 0x0070 /* Dump and reset statistical counters. */ #define CU_SRESUME 0x00a0 /* CU static resume. */ #define RU_NOP 0x0000 #define RX_START 0x0001 #define RX_RESUME 0x0002 #define RU_ABORT 0x0004 #define RX_ADDR_LOAD 0x0006 #define RX_RESUMENR 0x0007 #define INT_MASK 0x0100 #define DRVR_INT 0x0200 /* Driver generated interrupt. */ typedef struct { const char *name; const char *desc; uint16_t device_id; uint8_t revision; uint16_t subsystem_vendor_id; uint16_t subsystem_id; uint32_t device; uint8_t stats_size; bool has_extended_tcb_support; bool power_management; } E100PCIDeviceInfo; /* Offsets to the various registers. All accesses need not be longword aligned. */ typedef enum { SCBStatus = 0, /* Status Word. */ SCBAck = 1, SCBCmd = 2, /* Rx/Command Unit command and status. */ SCBIntmask = 3, SCBPointer = 4, /* General purpose pointer. */ SCBPort = 8, /* Misc. commands and operands. */ SCBflash = 12, /* Flash memory control. */ SCBeeprom = 14, /* EEPROM control. */ SCBCtrlMDI = 16, /* MDI interface control. */ SCBEarlyRx = 20, /* Early receive byte count. */ SCBFlow = 24, /* Flow Control. */ SCBpmdr = 27, /* Power Management Driver. */ SCBgctrl = 28, /* General Control. */ SCBgstat = 29, /* General Status. */ } E100RegisterOffset; /* A speedo3 transmit buffer descriptor with two buffers... */ typedef struct { uint16_t status; uint16_t command; uint32_t link; /* void * */ uint32_t tbd_array_addr; /* transmit buffer descriptor array address. */ uint16_t tcb_bytes; /* transmit command block byte count (in lower 14 bits */ uint8_t tx_threshold; /* transmit threshold */ uint8_t tbd_count; /* TBD number */ #if 0 /* This constitutes two "TBD" entries: hdr and data */ uint32_t tx_buf_addr0; /* void *, header of frame to be transmitted. */ int32_t tx_buf_size0; /* Length of Tx hdr. */ uint32_t tx_buf_addr1; /* void *, data to be transmitted. */ int32_t tx_buf_size1; /* Length of Tx data. */ #endif } eepro100_tx_t; /* Receive frame descriptor. */ typedef struct { int16_t status; uint16_t command; uint32_t link; /* struct RxFD * */ uint32_t rx_buf_addr; /* void * */ uint16_t count; uint16_t size; /* Ethernet frame data follows. */ } eepro100_rx_t; typedef enum { COMMAND_EL = BIT(15), COMMAND_S = BIT(14), COMMAND_I = BIT(13), COMMAND_NC = BIT(4), COMMAND_SF = BIT(3), COMMAND_CMD = BITS(2, 0), } scb_command_bit; typedef enum { STATUS_C = BIT(15), STATUS_OK = BIT(13), } scb_status_bit; typedef struct { uint32_t tx_good_frames, tx_max_collisions, tx_late_collisions, tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, tx_multiple_collisions, tx_total_collisions; uint32_t rx_good_frames, rx_crc_errors, rx_alignment_errors, rx_resource_errors, rx_overrun_errors, rx_cdt_errors, rx_short_frame_errors; uint32_t fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; uint16_t xmt_tco_frames, rcv_tco_frames; /* TODO: i82559 has six reserved statistics but a total of 24 dwords. */ uint32_t reserved[4]; } eepro100_stats_t; typedef enum { cu_idle = 0, cu_suspended = 1, cu_active = 2, cu_lpq_active = 2, cu_hqp_active = 3 } cu_state_t; typedef enum { ru_idle = 0, ru_suspended = 1, ru_no_resources = 2, ru_ready = 4 } ru_state_t; typedef struct { PCIDevice dev; /* Hash register (multicast mask array, multiple individual addresses). */ uint8_t mult[8]; MemoryRegion mmio_bar; MemoryRegion io_bar; MemoryRegion flash_bar; NICState *nic; NICConf conf; uint8_t scb_stat; /* SCB stat/ack byte */ uint8_t int_stat; /* PCI interrupt status */ /* region must not be saved by nic_save. */ uint16_t mdimem[32]; eeprom_t *eeprom; uint32_t device; /* device variant */ /* (cu_base + cu_offset) address the next command block in the command block list. */ uint32_t cu_base; /* CU base address */ uint32_t cu_offset; /* CU address offset */ /* (ru_base + ru_offset) address the RFD in the Receive Frame Area. */ uint32_t ru_base; /* RU base address */ uint32_t ru_offset; /* RU address offset */ uint32_t statsaddr; /* pointer to eepro100_stats_t */ /* Temporary status information (no need to save these values), * used while processing CU commands. */ eepro100_tx_t tx; /* transmit buffer descriptor */ uint32_t cb_address; /* = cu_base + cu_offset */ /* Statistical counters. Also used for wake-up packet (i82559). */ eepro100_stats_t statistics; /* Data in mem is always in the byte order of the controller (le). * It must be dword aligned to allow direct access to 32 bit values. */ uint8_t mem[PCI_MEM_SIZE] __attribute__((aligned(8))); /* Configuration bytes. */ uint8_t configuration[22]; /* vmstate for each particular nic */ VMStateDescription *vmstate; /* Quasi static device properties (no need to save them). */ uint16_t stats_size; bool has_extended_tcb_support; } EEPRO100State; /* Word indices in EEPROM. */ typedef enum { EEPROM_CNFG_MDIX = 0x03, EEPROM_ID = 0x05, EEPROM_PHY_ID = 0x06, EEPROM_VENDOR_ID = 0x0c, EEPROM_CONFIG_ASF = 0x0d, EEPROM_DEVICE_ID = 0x23, EEPROM_SMBUS_ADDR = 0x90, } EEPROMOffset; /* Bit values for EEPROM ID word. */ typedef enum { EEPROM_ID_MDM = BIT(0), /* Modem */ EEPROM_ID_STB = BIT(1), /* Standby Enable */ EEPROM_ID_WMR = BIT(2), /* ??? */ EEPROM_ID_WOL = BIT(5), /* Wake on LAN */ EEPROM_ID_DPD = BIT(6), /* Deep Power Down */ EEPROM_ID_ALT = BIT(7), /* */ /* BITS(10, 8) device revision */ EEPROM_ID_BD = BIT(11), /* boot disable */ EEPROM_ID_ID = BIT(13), /* id bit */ /* BITS(15, 14) signature */ EEPROM_ID_VALID = BIT(14), /* signature for valid eeprom */ } eeprom_id_bit; /* Default values for MDI (PHY) registers */ static const uint16_t eepro100_mdi_default[] = { /* MDI Registers 0 - 6, 7 */ 0x3000, 0x780d, 0x02a8, 0x0154, 0x05e1, 0x0000, 0x0000, 0x0000, /* MDI Registers 8 - 15 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* MDI Registers 16 - 31 */ 0x0003, 0x0000, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; /* Readonly mask for MDI (PHY) registers */ static const uint16_t eepro100_mdi_mask[] = { 0x0000, 0xffff, 0xffff, 0xffff, 0xc01f, 0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0fff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; #define POLYNOMIAL 0x04c11db6 static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s); /* From FreeBSD (locally modified). */ static unsigned e100_compute_mcast_idx(const uint8_t *ep) { uint32_t crc; int carry, i, j; uint8_t b; crc = 0xffffffff; for (i = 0; i < 6; i++) { b = *ep++; for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000L) ? 1 : 0) ^ (b & 0x01); crc <<= 1; b >>= 1; if (carry) { crc = ((crc ^ POLYNOMIAL) | carry); } } } return (crc & BITS(7, 2)) >> 2; } /* Read a 16 bit control/status (CSR) register. */ static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr) { assert(!((uintptr_t)&s->mem[addr] & 1)); return le16_to_cpup((uint16_t *)&s->mem[addr]); } /* Read a 32 bit control/status (CSR) register. */ static uint32_t e100_read_reg4(EEPRO100State *s, E100RegisterOffset addr) { assert(!((uintptr_t)&s->mem[addr] & 3)); return le32_to_cpup((uint32_t *)&s->mem[addr]); } /* Write a 16 bit control/status (CSR) register. */ static void e100_write_reg2(EEPRO100State *s, E100RegisterOffset addr, uint16_t val) { assert(!((uintptr_t)&s->mem[addr] & 1)); cpu_to_le16w((uint16_t *)&s->mem[addr], val); } /* Read a 32 bit control/status (CSR) register. */ static void e100_write_reg4(EEPRO100State *s, E100RegisterOffset addr, uint32_t val) { assert(!((uintptr_t)&s->mem[addr] & 3)); cpu_to_le32w((uint32_t *)&s->mem[addr], val); } #if defined(DEBUG_EEPRO100) static const char *nic_dump(const uint8_t * buf, unsigned size) { static char dump[3 * 16 + 1]; char *p = &dump[0]; if (size > 16) { size = 16; } while (size-- > 0) { p += sprintf(p, " %02x", *buf++); } return dump; } #endif /* DEBUG_EEPRO100 */ enum scb_stat_ack { stat_ack_not_ours = 0x00, stat_ack_sw_gen = 0x04, stat_ack_rnr = 0x10, stat_ack_cu_idle = 0x20, stat_ack_frame_rx = 0x40, stat_ack_cu_cmd_done = 0x80, stat_ack_not_present = 0xFF, stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), }; static void disable_interrupt(EEPRO100State * s) { if (s->int_stat) { TRACE(INT, logout("interrupt disabled\n")); qemu_irq_lower(s->dev.irq[0]); s->int_stat = 0; } } static void enable_interrupt(EEPRO100State * s) { if (!s->int_stat) { TRACE(INT, logout("interrupt enabled\n")); qemu_irq_raise(s->dev.irq[0]); s->int_stat = 1; } } static void eepro100_acknowledge(EEPRO100State * s) { s->scb_stat &= ~s->mem[SCBAck]; s->mem[SCBAck] = s->scb_stat; if (s->scb_stat == 0) { disable_interrupt(s); } } static void eepro100_interrupt(EEPRO100State * s, uint8_t status) { uint8_t mask = ~s->mem[SCBIntmask]; s->mem[SCBAck] |= status; status = s->scb_stat = s->mem[SCBAck]; status &= (mask | 0x0f); #if 0 status &= (~s->mem[SCBIntmask] | 0x0xf); #endif if (status && (mask & 0x01)) { /* SCB mask and SCB Bit M do not disable interrupt. */ enable_interrupt(s); } else if (s->int_stat) { disable_interrupt(s); } } static void eepro100_cx_interrupt(EEPRO100State * s) { /* CU completed action command. */ /* Transmit not ok (82557 only, not in emulation). */ eepro100_interrupt(s, 0x80); } static void eepro100_cna_interrupt(EEPRO100State * s) { /* CU left the active state. */ eepro100_interrupt(s, 0x20); } static void eepro100_fr_interrupt(EEPRO100State * s) { /* RU received a complete frame. */ eepro100_interrupt(s, 0x40); } static void eepro100_rnr_interrupt(EEPRO100State * s) { /* RU is not ready. */ eepro100_interrupt(s, 0x10); } static void eepro100_mdi_interrupt(EEPRO100State * s) { /* MDI completed read or write cycle. */ eepro100_interrupt(s, 0x08); } static void eepro100_swi_interrupt(EEPRO100State * s) { /* Software has requested an interrupt. */ eepro100_interrupt(s, 0x04); } #if 0 static void eepro100_fcp_interrupt(EEPRO100State * s) { /* Flow control pause interrupt (82558 and later). */ eepro100_interrupt(s, 0x01); } #endif static void e100_pci_reset(EEPRO100State * s) { E100PCIDeviceInfo *info = eepro100_get_class(s); uint32_t device = s->device; uint8_t *pci_conf = s->dev.config; TRACE(OTHER, logout("%p\n", s)); /* PCI Status */ pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM | PCI_STATUS_FAST_BACK); /* PCI Latency Timer */ pci_set_byte(pci_conf + PCI_LATENCY_TIMER, 0x20); /* latency timer = 32 clocks */ /* Capability Pointer is set by PCI framework. */ /* Interrupt Line */ /* Interrupt Pin */ pci_set_byte(pci_conf + PCI_INTERRUPT_PIN, 1); /* interrupt pin A */ /* Minimum Grant */ pci_set_byte(pci_conf + PCI_MIN_GNT, 0x08); /* Maximum Latency */ pci_set_byte(pci_conf + PCI_MAX_LAT, 0x18); s->stats_size = info->stats_size; s->has_extended_tcb_support = info->has_extended_tcb_support; switch (device) { case i82550: case i82551: case i82557A: case i82557B: case i82557C: case i82558A: case i82558B: case i82559A: case i82559B: case i82559ER: case i82562: case i82801: case i82559C: break; default: logout("Device %X is undefined!\n", device); } /* Standard TxCB. */ s->configuration[6] |= BIT(4); /* Standard statistical counters. */ s->configuration[6] |= BIT(5); if (s->stats_size == 80) { /* TODO: check TCO Statistical Counters bit. Documentation not clear. */ if (s->configuration[6] & BIT(2)) { /* TCO statistical counters. */ assert(s->configuration[6] & BIT(5)); } else { if (s->configuration[6] & BIT(5)) { /* No extended statistical counters, i82557 compatible. */ s->stats_size = 64; } else { /* i82558 compatible. */ s->stats_size = 76; } } } else { if (s->configuration[6] & BIT(5)) { /* No extended statistical counters. */ s->stats_size = 64; } } assert(s->stats_size > 0 && s->stats_size <= sizeof(s->statistics)); if (info->power_management) { /* Power Management Capabilities */ int cfg_offset = 0xdc; int r = pci_add_capability(&s->dev, PCI_CAP_ID_PM, cfg_offset, PCI_PM_SIZEOF); assert(r >= 0); pci_set_word(pci_conf + cfg_offset + PCI_PM_PMC, 0x7e21); #if 0 /* TODO: replace dummy code for power management emulation. */ /* TODO: Power Management Control / Status. */ pci_set_word(pci_conf + cfg_offset + PCI_PM_CTRL, 0x0000); /* TODO: Ethernet Power Consumption Registers (i82559 and later). */ pci_set_byte(pci_conf + cfg_offset + PCI_PM_PPB_EXTENSIONS, 0x0000); #endif } #if EEPROM_SIZE > 0 if (device == i82557C || device == i82558B || device == i82559C) { /* TODO: get vendor id from EEPROM for i82557C or later. TODO: get device id from EEPROM for i82557C or later. TODO: status bit 4 can be disabled by EEPROM for i82558, i82559. TODO: header type is determined by EEPROM for i82559. TODO: get subsystem id from EEPROM for i82557C or later. TODO: get subsystem vendor id from EEPROM for i82557C or later. TODO: exp. rom baddr depends on a bit in EEPROM for i82558 or later. TODO: capability pointer depends on EEPROM for i82558. */ logout("Get device id and revision from EEPROM!!!\n"); } #endif /* EEPROM_SIZE > 0 */ } static void nic_selective_reset(EEPRO100State * s) { size_t i; uint16_t *eeprom_contents = eeprom93xx_data(s->eeprom); #if 0 eeprom93xx_reset(s->eeprom); #endif memcpy(eeprom_contents, s->conf.macaddr.a, 6); eeprom_contents[EEPROM_ID] = EEPROM_ID_VALID; if (s->device == i82557B || s->device == i82557C) eeprom_contents[5] = 0x0100; eeprom_contents[EEPROM_PHY_ID] = 1; uint16_t sum = 0; for (i = 0; i < EEPROM_SIZE - 1; i++) { sum += eeprom_contents[i]; } eeprom_contents[EEPROM_SIZE - 1] = 0xbaba - sum; TRACE(EEPROM, logout("checksum=0x%04x\n", eeprom_contents[EEPROM_SIZE - 1])); memset(s->mem, 0, sizeof(s->mem)); e100_write_reg4(s, SCBCtrlMDI, BIT(21)); assert(sizeof(s->mdimem) == sizeof(eepro100_mdi_default)); memcpy(&s->mdimem[0], &eepro100_mdi_default[0], sizeof(s->mdimem)); } static void nic_reset(void *opaque) { EEPRO100State *s = opaque; TRACE(OTHER, logout("%p\n", s)); /* TODO: Clearing of hash register for selective reset, too? */ memset(&s->mult[0], 0, sizeof(s->mult)); nic_selective_reset(s); } #if defined(DEBUG_EEPRO100) static const char * const e100_reg[PCI_IO_SIZE / 4] = { "Command/Status", "General Pointer", "Port", "EEPROM/Flash Control", "MDI Control", "Receive DMA Byte Count", "Flow Control", "General Status/Control" }; static char *regname(uint32_t addr) { static char buf[32]; if (addr < PCI_IO_SIZE) { const char *r = e100_reg[addr / 4]; if (r != 0) { snprintf(buf, sizeof(buf), "%s+%u", r, addr % 4); } else { snprintf(buf, sizeof(buf), "0x%02x", addr); } } else { snprintf(buf, sizeof(buf), "??? 0x%08x", addr); } return buf; } #endif /* DEBUG_EEPRO100 */ /***************************************************************************** * * Command emulation. * ****************************************************************************/ #if 0 static uint16_t eepro100_read_command(EEPRO100State * s) { uint16_t val = 0xffff; TRACE(OTHER, logout("val=0x%04x\n", val)); return val; } #endif /* Commands that can be put in a command list entry. */ enum commands { CmdNOp = 0, CmdIASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, CmdTx = 4, CmdTDR = 5, /* load microcode */ CmdDump = 6, CmdDiagnose = 7, /* And some extra flags: */ CmdSuspend = 0x4000, /* Suspend after completion. */ CmdIntr = 0x2000, /* Interrupt after completion. */ CmdTxFlex = 0x0008, /* Use "Flexible mode" for CmdTx command. */ }; static cu_state_t get_cu_state(EEPRO100State * s) { return ((s->mem[SCBStatus] & BITS(7, 6)) >> 6); } static void set_cu_state(EEPRO100State * s, cu_state_t state) { s->mem[SCBStatus] = (s->mem[SCBStatus] & ~BITS(7, 6)) + (state << 6); } static ru_state_t get_ru_state(EEPRO100State * s) { return ((s->mem[SCBStatus] & BITS(5, 2)) >> 2); } static void set_ru_state(EEPRO100State * s, ru_state_t state) { s->mem[SCBStatus] = (s->mem[SCBStatus] & ~BITS(5, 2)) + (state << 2); } static void dump_statistics(EEPRO100State * s) { /* Dump statistical data. Most data is never changed by the emulation * and always 0, so we first just copy the whole block and then those * values which really matter. * Number of data should check configuration!!! */ pci_dma_write(&s->dev, s->statsaddr, &s->statistics, s->stats_size); stl_le_pci_dma(&s->dev, s->statsaddr + 0, s->statistics.tx_good_frames); stl_le_pci_dma(&s->dev, s->statsaddr + 36, s->statistics.rx_good_frames); stl_le_pci_dma(&s->dev, s->statsaddr + 48, s->statistics.rx_resource_errors); stl_le_pci_dma(&s->dev, s->statsaddr + 60, s->statistics.rx_short_frame_errors); #if 0 stw_le_pci_dma(&s->dev, s->statsaddr + 76, s->statistics.xmt_tco_frames); stw_le_pci_dma(&s->dev, s->statsaddr + 78, s->statistics.rcv_tco_frames); missing("CU dump statistical counters"); #endif } static void read_cb(EEPRO100State *s) { pci_dma_read(&s->dev, s->cb_address, &s->tx, sizeof(s->tx)); s->tx.status = le16_to_cpu(s->tx.status); s->tx.command = le16_to_cpu(s->tx.command); s->tx.link = le32_to_cpu(s->tx.link); s->tx.tbd_array_addr = le32_to_cpu(s->tx.tbd_array_addr); s->tx.tcb_bytes = le16_to_cpu(s->tx.tcb_bytes); } static void tx_command(EEPRO100State *s) { uint32_t tbd_array = le32_to_cpu(s->tx.tbd_array_addr); uint16_t tcb_bytes = (le16_to_cpu(s->tx.tcb_bytes) & 0x3fff); /* Sends larger than MAX_ETH_FRAME_SIZE are allowed, up to 2600 bytes. */ uint8_t buf[2600]; uint16_t size = 0; uint32_t tbd_address = s->cb_address + 0x10; TRACE(RXTX, logout ("transmit, TBD array address 0x%08x, TCB byte count 0x%04x, TBD count %u\n", tbd_array, tcb_bytes, s->tx.tbd_count)); if (tcb_bytes > 2600) { logout("TCB byte count too large, using 2600\n"); tcb_bytes = 2600; } if (!((tcb_bytes > 0) || (tbd_array != 0xffffffff))) { logout ("illegal values of TBD array address and TCB byte count!\n"); } assert(tcb_bytes <= sizeof(buf)); while (size < tcb_bytes) { uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); #if 0 uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); #endif tbd_address += 8; TRACE(RXTX, logout ("TBD (simplified mode): buffer address 0x%08x, size 0x%04x\n", tx_buffer_address, tx_buffer_size)); tx_buffer_size = MIN(tx_buffer_size, sizeof(buf) - size); pci_dma_read(&s->dev, tx_buffer_address, &buf[size], tx_buffer_size); size += tx_buffer_size; } if (tbd_array == 0xffffffff) { /* Simplified mode. Was already handled by code above. */ } else { /* Flexible mode. */ uint8_t tbd_count = 0; if (s->has_extended_tcb_support && !(s->configuration[6] & BIT(4))) { /* Extended Flexible TCB. */ for (; tbd_count < 2; tbd_count++) { uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); tbd_address += 8; TRACE(RXTX, logout ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n", tx_buffer_address, tx_buffer_size)); tx_buffer_size = MIN(tx_buffer_size, sizeof(buf) - size); pci_dma_read(&s->dev, tx_buffer_address, &buf[size], tx_buffer_size); size += tx_buffer_size; if (tx_buffer_el & 1) { break; } } } tbd_address = tbd_array; for (; tbd_count < s->tx.tbd_count; tbd_count++) { uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); tbd_address += 8; TRACE(RXTX, logout ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n", tx_buffer_address, tx_buffer_size)); tx_buffer_size = MIN(tx_buffer_size, sizeof(buf) - size); pci_dma_read(&s->dev, tx_buffer_address, &buf[size], tx_buffer_size); size += tx_buffer_size; if (tx_buffer_el & 1) { break; } } } TRACE(RXTX, logout("%p sending frame, len=%d,%s\n", s, size, nic_dump(buf, size))); qemu_send_packet(&s->nic->nc, buf, size); s->statistics.tx_good_frames++; /* Transmit with bad status would raise an CX/TNO interrupt. * (82557 only). Emulation never has bad status. */ #if 0 eepro100_cx_interrupt(s); #endif } static void set_multicast_list(EEPRO100State *s) { uint16_t multicast_count = s->tx.tbd_array_addr & BITS(13, 0); uint16_t i; memset(&s->mult[0], 0, sizeof(s->mult)); TRACE(OTHER, logout("multicast list, multicast count = %u\n", multicast_count)); for (i = 0; i < multicast_count; i += 6) { uint8_t multicast_addr[6]; pci_dma_read(&s->dev, s->cb_address + 10 + i, multicast_addr, 6); TRACE(OTHER, logout("multicast entry %s\n", nic_dump(multicast_addr, 6))); unsigned mcast_idx = e100_compute_mcast_idx(multicast_addr); assert(mcast_idx < 64); s->mult[mcast_idx >> 3] |= (1 << (mcast_idx & 7)); } } static void action_command(EEPRO100State *s) { for (;;) { bool bit_el; bool bit_s; bool bit_i; bool bit_nc; uint16_t ok_status = STATUS_OK; s->cb_address = s->cu_base + s->cu_offset; read_cb(s); bit_el = ((s->tx.command & COMMAND_EL) != 0); bit_s = ((s->tx.command & COMMAND_S) != 0); bit_i = ((s->tx.command & COMMAND_I) != 0); bit_nc = ((s->tx.command & COMMAND_NC) != 0); #if 0 bool bit_sf = ((s->tx.command & COMMAND_SF) != 0); #endif s->cu_offset = s->tx.link; TRACE(OTHER, logout("val=(cu start), status=0x%04x, command=0x%04x, link=0x%08x\n", s->tx.status, s->tx.command, s->tx.link)); switch (s->tx.command & COMMAND_CMD) { case CmdNOp: /* Do nothing. */ break; case CmdIASetup: pci_dma_read(&s->dev, s->cb_address + 8, &s->conf.macaddr.a[0], 6); TRACE(OTHER, logout("macaddr: %s\n", nic_dump(&s->conf.macaddr.a[0], 6))); break; case CmdConfigure: pci_dma_read(&s->dev, s->cb_address + 8, &s->configuration[0], sizeof(s->configuration)); TRACE(OTHER, logout("configuration: %s\n", nic_dump(&s->configuration[0], 16))); TRACE(OTHER, logout("configuration: %s\n", nic_dump(&s->configuration[16], ARRAY_SIZE(s->configuration) - 16))); if (s->configuration[20] & BIT(6)) { TRACE(OTHER, logout("Multiple IA bit\n")); } break; case CmdMulticastList: set_multicast_list(s); break; case CmdTx: if (bit_nc) { missing("CmdTx: NC = 0"); ok_status = 0; break; } tx_command(s); break; case CmdTDR: TRACE(OTHER, logout("load microcode\n")); /* Starting with offset 8, the command contains * 64 dwords microcode which we just ignore here. */ break; case CmdDiagnose: TRACE(OTHER, logout("diagnose\n")); /* Make sure error flag is not set. */ s->tx.status = 0; break; default: missing("undefined command"); ok_status = 0; break; } /* Write new status. */ stw_le_pci_dma(&s->dev, s->cb_address, s->tx.status | ok_status | STATUS_C); if (bit_i) { /* CU completed action. */ eepro100_cx_interrupt(s); } if (bit_el) { /* CU becomes idle. Terminate command loop. */ set_cu_state(s, cu_idle); eepro100_cna_interrupt(s); break; } else if (bit_s) { /* CU becomes suspended. Terminate command loop. */ set_cu_state(s, cu_suspended); eepro100_cna_interrupt(s); break; } else { /* More entries in list. */ TRACE(OTHER, logout("CU list with at least one more entry\n")); } } TRACE(OTHER, logout("CU list empty\n")); /* List is empty. Now CU is idle or suspended. */ } static void eepro100_cu_command(EEPRO100State * s, uint8_t val) { cu_state_t cu_state; switch (val) { case CU_NOP: /* No operation. */ break; case CU_START: cu_state = get_cu_state(s); if (cu_state != cu_idle && cu_state != cu_suspended) { /* Intel documentation says that CU must be idle or suspended * for the CU start command. */ logout("unexpected CU state is %u\n", cu_state); } set_cu_state(s, cu_active); s->cu_offset = e100_read_reg4(s, SCBPointer); action_command(s); break; case CU_RESUME: if (get_cu_state(s) != cu_suspended) { logout("bad CU resume from CU state %u\n", get_cu_state(s)); /* Workaround for bad Linux eepro100 driver which resumes * from idle state. */ #if 0 missing("cu resume"); #endif set_cu_state(s, cu_suspended); } if (get_cu_state(s) == cu_suspended) { TRACE(OTHER, logout("CU resuming\n")); set_cu_state(s, cu_active); action_command(s); } break; case CU_STATSADDR: /* Load dump counters address. */ s->statsaddr = e100_read_reg4(s, SCBPointer); TRACE(OTHER, logout("val=0x%02x (dump counters address)\n", val)); if (s->statsaddr & 3) { /* Memory must be Dword aligned. */ logout("unaligned dump counters address\n"); /* Handling of misaligned addresses is undefined. * Here we align the address by ignoring the lower bits. */ /* TODO: Test unaligned dump counter address on real hardware. */ s->statsaddr &= ~3; } break; case CU_SHOWSTATS: /* Dump statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats)\n", val)); dump_statistics(s); stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005); break; case CU_CMD_BASE: /* Load CU base. */ TRACE(OTHER, logout("val=0x%02x (CU base address)\n", val)); s->cu_base = e100_read_reg4(s, SCBPointer); break; case CU_DUMPSTATS: /* Dump and reset statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats and reset)\n", val)); dump_statistics(s); stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007); memset(&s->statistics, 0, sizeof(s->statistics)); break; case CU_SRESUME: /* CU static resume. */ missing("CU static resume"); break; default: missing("Undefined CU command"); } } static void eepro100_ru_command(EEPRO100State * s, uint8_t val) { switch (val) { case RU_NOP: /* No operation. */ break; case RX_START: /* RU start. */ if (get_ru_state(s) != ru_idle) { logout("RU state is %u, should be %u\n", get_ru_state(s), ru_idle); #if 0 assert(!"wrong RU state"); #endif } set_ru_state(s, ru_ready); s->ru_offset = e100_read_reg4(s, SCBPointer); qemu_flush_queued_packets(&s->nic->nc); TRACE(OTHER, logout("val=0x%02x (rx start)\n", val)); break; case RX_RESUME: /* Restart RU. */ if (get_ru_state(s) != ru_suspended) { logout("RU state is %u, should be %u\n", get_ru_state(s), ru_suspended); #if 0 assert(!"wrong RU state"); #endif } set_ru_state(s, ru_ready); break; case RU_ABORT: /* RU abort. */ if (get_ru_state(s) == ru_ready) { eepro100_rnr_interrupt(s); } set_ru_state(s, ru_idle); break; case RX_ADDR_LOAD: /* Load RU base. */ TRACE(OTHER, logout("val=0x%02x (RU base address)\n", val)); s->ru_base = e100_read_reg4(s, SCBPointer); break; default: logout("val=0x%02x (undefined RU command)\n", val); missing("Undefined SU command"); } } static void eepro100_write_command(EEPRO100State * s, uint8_t val) { eepro100_ru_command(s, val & 0x0f); eepro100_cu_command(s, val & 0xf0); if ((val) == 0) { TRACE(OTHER, logout("val=0x%02x\n", val)); } /* Clear command byte after command was accepted. */ s->mem[SCBCmd] = 0; } /***************************************************************************** * * EEPROM emulation. * ****************************************************************************/ #define EEPROM_CS 0x02 #define EEPROM_SK 0x01 #define EEPROM_DI 0x04 #define EEPROM_DO 0x08 static uint16_t eepro100_read_eeprom(EEPRO100State * s) { uint16_t val = e100_read_reg2(s, SCBeeprom); if (eeprom93xx_read(s->eeprom)) { val |= EEPROM_DO; } else { val &= ~EEPROM_DO; } TRACE(EEPROM, logout("val=0x%04x\n", val)); return val; } static void eepro100_write_eeprom(eeprom_t * eeprom, uint8_t val) { TRACE(EEPROM, logout("val=0x%02x\n", val)); /* mask unwritable bits */ #if 0 val = SET_MASKED(val, 0x31, eeprom->value); #endif int eecs = ((val & EEPROM_CS) != 0); int eesk = ((val & EEPROM_SK) != 0); int eedi = ((val & EEPROM_DI) != 0); eeprom93xx_write(eeprom, eecs, eesk, eedi); } /***************************************************************************** * * MDI emulation. * ****************************************************************************/ #if defined(DEBUG_EEPRO100) static const char * const mdi_op_name[] = { "opcode 0", "write", "read", "opcode 3" }; static const char * const mdi_reg_name[] = { "Control", "Status", "PHY Identification (Word 1)", "PHY Identification (Word 2)", "Auto-Negotiation Advertisement", "Auto-Negotiation Link Partner Ability", "Auto-Negotiation Expansion" }; static const char *reg2name(uint8_t reg) { static char buffer[10]; const char *p = buffer; if (reg < ARRAY_SIZE(mdi_reg_name)) { p = mdi_reg_name[reg]; } else { snprintf(buffer, sizeof(buffer), "reg=0x%02x", reg); } return p; } #endif /* DEBUG_EEPRO100 */ static uint32_t eepro100_read_mdi(EEPRO100State * s) { uint32_t val = e100_read_reg4(s, SCBCtrlMDI); #ifdef DEBUG_EEPRO100 uint8_t raiseint = (val & BIT(29)) >> 29; uint8_t opcode = (val & BITS(27, 26)) >> 26; uint8_t phy = (val & BITS(25, 21)) >> 21; uint8_t reg = (val & BITS(20, 16)) >> 16; uint16_t data = (val & BITS(15, 0)); #endif /* Emulation takes no time to finish MDI transaction. */ val |= BIT(28); TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", val, raiseint, mdi_op_name[opcode], phy, reg2name(reg), data)); return val; } static void eepro100_write_mdi(EEPRO100State *s) { uint32_t val = e100_read_reg4(s, SCBCtrlMDI); uint8_t raiseint = (val & BIT(29)) >> 29; uint8_t opcode = (val & BITS(27, 26)) >> 26; uint8_t phy = (val & BITS(25, 21)) >> 21; uint8_t reg = (val & BITS(20, 16)) >> 16; uint16_t data = (val & BITS(15, 0)); TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", val, raiseint, mdi_op_name[opcode], phy, reg2name(reg), data)); if (phy != 1) { /* Unsupported PHY address. */ #if 0 logout("phy must be 1 but is %u\n", phy); #endif data = 0; } else if (opcode != 1 && opcode != 2) { /* Unsupported opcode. */ logout("opcode must be 1 or 2 but is %u\n", opcode); data = 0; } else if (reg > 6) { /* Unsupported register. */ logout("register must be 0...6 but is %u\n", reg); data = 0; } else { TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", val, raiseint, mdi_op_name[opcode], phy, reg2name(reg), data)); if (opcode == 1) { /* MDI write */ switch (reg) { case 0: /* Control Register */ if (data & 0x8000) { /* Reset status and control registers to default. */ s->mdimem[0] = eepro100_mdi_default[0]; s->mdimem[1] = eepro100_mdi_default[1]; data = s->mdimem[reg]; } else { /* Restart Auto Configuration = Normal Operation */ data &= ~0x0200; } break; case 1: /* Status Register */ missing("not writable"); data = s->mdimem[reg]; break; case 2: /* PHY Identification Register (Word 1) */ case 3: /* PHY Identification Register (Word 2) */ missing("not implemented"); break; case 4: /* Auto-Negotiation Advertisement Register */ case 5: /* Auto-Negotiation Link Partner Ability Register */ break; case 6: /* Auto-Negotiation Expansion Register */ default: missing("not implemented"); } s->mdimem[reg] = data; } else if (opcode == 2) { /* MDI read */ switch (reg) { case 0: /* Control Register */ if (data & 0x8000) { /* Reset status and control registers to default. */ s->mdimem[0] = eepro100_mdi_default[0]; s->mdimem[1] = eepro100_mdi_default[1]; } break; case 1: /* Status Register */ s->mdimem[reg] |= 0x0020; break; case 2: /* PHY Identification Register (Word 1) */ case 3: /* PHY Identification Register (Word 2) */ case 4: /* Auto-Negotiation Advertisement Register */ break; case 5: /* Auto-Negotiation Link Partner Ability Register */ s->mdimem[reg] = 0x41fe; break; case 6: /* Auto-Negotiation Expansion Register */ s->mdimem[reg] = 0x0001; break; } data = s->mdimem[reg]; } /* Emulation takes no time to finish MDI transaction. * Set MDI bit in SCB status register. */ s->mem[SCBAck] |= 0x08; val |= BIT(28); if (raiseint) { eepro100_mdi_interrupt(s); } } val = (val & 0xffff0000) + data; e100_write_reg4(s, SCBCtrlMDI, val); } /***************************************************************************** * * Port emulation. * ****************************************************************************/ #define PORT_SOFTWARE_RESET 0 #define PORT_SELFTEST 1 #define PORT_SELECTIVE_RESET 2 #define PORT_DUMP 3 #define PORT_SELECTION_MASK 3 typedef struct { uint32_t st_sign; /* Self Test Signature */ uint32_t st_result; /* Self Test Results */ } eepro100_selftest_t; static uint32_t eepro100_read_port(EEPRO100State * s) { return 0; } static void eepro100_write_port(EEPRO100State *s) { uint32_t val = e100_read_reg4(s, SCBPort); uint32_t address = (val & ~PORT_SELECTION_MASK); uint8_t selection = (val & PORT_SELECTION_MASK); switch (selection) { case PORT_SOFTWARE_RESET: nic_reset(s); break; case PORT_SELFTEST: TRACE(OTHER, logout("selftest address=0x%08x\n", address)); eepro100_selftest_t data; pci_dma_read(&s->dev, address, (uint8_t *) &data, sizeof(data)); data.st_sign = 0xffffffff; data.st_result = 0; pci_dma_write(&s->dev, address, (uint8_t *) &data, sizeof(data)); break; case PORT_SELECTIVE_RESET: TRACE(OTHER, logout("selective reset, selftest address=0x%08x\n", address)); nic_selective_reset(s); break; default: logout("val=0x%08x\n", val); missing("unknown port selection"); } } /***************************************************************************** * * General hardware emulation. * ****************************************************************************/ static uint8_t eepro100_read1(EEPRO100State * s, uint32_t addr) { uint8_t val = 0; if (addr <= sizeof(s->mem) - sizeof(val)) { val = s->mem[addr]; } switch (addr) { case SCBStatus: case SCBAck: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBCmd: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); #if 0 val = eepro100_read_command(s); #endif break; case SCBIntmask: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBPort + 3: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBeeprom: val = eepro100_read_eeprom(s); break; case SCBCtrlMDI: case SCBCtrlMDI + 1: case SCBCtrlMDI + 2: case SCBCtrlMDI + 3: val = (uint8_t)(eepro100_read_mdi(s) >> (8 * (addr & 3))); TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBpmdr: /* Power Management Driver Register */ val = 0; TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBgctrl: /* General Control Register */ TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBgstat: /* General Status Register */ /* 100 Mbps full duplex, valid link */ val = 0x07; TRACE(OTHER, logout("addr=General Status val=%02x\n", val)); break; default: logout("addr=%s val=0x%02x\n", regname(addr), val); missing("unknown byte read"); } return val; } static uint16_t eepro100_read2(EEPRO100State * s, uint32_t addr) { uint16_t val = 0; if (addr <= sizeof(s->mem) - sizeof(val)) { val = e100_read_reg2(s, addr); } switch (addr) { case SCBStatus: case SCBCmd: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; case SCBeeprom: val = eepro100_read_eeprom(s); TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; case SCBCtrlMDI: case SCBCtrlMDI + 2: val = (uint16_t)(eepro100_read_mdi(s) >> (8 * (addr & 3))); TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; default: logout("addr=%s val=0x%04x\n", regname(addr), val); missing("unknown word read"); } return val; } static uint32_t eepro100_read4(EEPRO100State * s, uint32_t addr) { uint32_t val = 0; if (addr <= sizeof(s->mem) - sizeof(val)) { val = e100_read_reg4(s, addr); } switch (addr) { case SCBStatus: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); break; case SCBPointer: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); break; case SCBPort: val = eepro100_read_port(s); TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); break; case SCBflash: val = eepro100_read_eeprom(s); TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); break; case SCBCtrlMDI: val = eepro100_read_mdi(s); break; default: logout("addr=%s val=0x%08x\n", regname(addr), val); missing("unknown longword read"); } return val; } static void eepro100_write1(EEPRO100State * s, uint32_t addr, uint8_t val) { /* SCBStatus is readonly. */ if (addr > SCBStatus && addr <= sizeof(s->mem) - sizeof(val)) { s->mem[addr] = val; } switch (addr) { case SCBStatus: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBAck: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); eepro100_acknowledge(s); break; case SCBCmd: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); eepro100_write_command(s, val); break; case SCBIntmask: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); if (val & BIT(1)) { eepro100_swi_interrupt(s); } eepro100_interrupt(s, 0); break; case SCBPointer: case SCBPointer + 1: case SCBPointer + 2: case SCBPointer + 3: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBPort: case SCBPort + 1: case SCBPort + 2: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBPort + 3: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); eepro100_write_port(s); break; case SCBFlow: /* does not exist on 82557 */ case SCBFlow + 1: case SCBFlow + 2: case SCBpmdr: /* does not exist on 82557 */ TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBeeprom: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); eepro100_write_eeprom(s->eeprom, val); break; case SCBCtrlMDI: case SCBCtrlMDI + 1: case SCBCtrlMDI + 2: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBCtrlMDI + 3: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); eepro100_write_mdi(s); break; default: logout("addr=%s val=0x%02x\n", regname(addr), val); missing("unknown byte write"); } } static void eepro100_write2(EEPRO100State * s, uint32_t addr, uint16_t val) { /* SCBStatus is readonly. */ if (addr > SCBStatus && addr <= sizeof(s->mem) - sizeof(val)) { e100_write_reg2(s, addr, val); } switch (addr) { case SCBStatus: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); s->mem[SCBAck] = (val >> 8); eepro100_acknowledge(s); break; case SCBCmd: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); eepro100_write_command(s, val); eepro100_write1(s, SCBIntmask, val >> 8); break; case SCBPointer: case SCBPointer + 2: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; case SCBPort: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; case SCBPort + 2: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); eepro100_write_port(s); break; case SCBeeprom: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); eepro100_write_eeprom(s->eeprom, val); break; case SCBCtrlMDI: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); break; case SCBCtrlMDI + 2: TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); eepro100_write_mdi(s); break; default: logout("addr=%s val=0x%04x\n", regname(addr), val); missing("unknown word write"); } } static void eepro100_write4(EEPRO100State * s, uint32_t addr, uint32_t val) { if (addr <= sizeof(s->mem) - sizeof(val)) { e100_write_reg4(s, addr, val); } switch (addr) { case SCBPointer: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); break; case SCBPort: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); eepro100_write_port(s); break; case SCBflash: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); val = val >> 16; eepro100_write_eeprom(s->eeprom, val); break; case SCBCtrlMDI: TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); eepro100_write_mdi(s); break; default: logout("addr=%s val=0x%08x\n", regname(addr), val); missing("unknown longword write"); } } static uint64_t eepro100_read(void *opaque, hwaddr addr, unsigned size) { EEPRO100State *s = opaque; switch (size) { case 1: return eepro100_read1(s, addr); case 2: return eepro100_read2(s, addr); case 4: return eepro100_read4(s, addr); default: abort(); } } static void eepro100_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { EEPRO100State *s = opaque; switch (size) { case 1: eepro100_write1(s, addr, data); break; case 2: eepro100_write2(s, addr, data); break; case 4: eepro100_write4(s, addr, data); break; default: abort(); } } static const MemoryRegionOps eepro100_ops = { .read = eepro100_read, .write = eepro100_write, .endianness = DEVICE_LITTLE_ENDIAN, }; static int nic_can_receive(NetClientState *nc) { EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque; TRACE(RXTX, logout("%p\n", s)); return get_ru_state(s) == ru_ready; #if 0 return !eepro100_buffer_full(s); #endif } static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) { /* TODO: * - Magic packets should set bit 30 in power management driver register. * - Interesting packets should set bit 29 in power management driver register. */ EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque; uint16_t rfd_status = 0xa000; #if defined(CONFIG_PAD_RECEIVED_FRAMES) uint8_t min_buf[60]; #endif static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #if defined(CONFIG_PAD_RECEIVED_FRAMES) /* Pad to minimum Ethernet frame length */ if (size < sizeof(min_buf)) { memcpy(min_buf, buf, size); memset(&min_buf[size], 0, sizeof(min_buf) - size); buf = min_buf; size = sizeof(min_buf); } #endif if (s->configuration[8] & 0x80) { /* CSMA is disabled. */ logout("%p received while CSMA is disabled\n", s); return -1; #if !defined(CONFIG_PAD_RECEIVED_FRAMES) } else if (size < 64 && (s->configuration[7] & BIT(0))) { /* Short frame and configuration byte 7/0 (discard short receive) set: * Short frame is discarded */ logout("%p received short frame (%zu byte)\n", s, size); s->statistics.rx_short_frame_errors++; return -1; #endif } else if ((size > MAX_ETH_FRAME_SIZE + 4) && !(s->configuration[18] & BIT(3))) { /* Long frame and configuration byte 18/3 (long receive ok) not set: * Long frames are discarded. */ logout("%p received long frame (%zu byte), ignored\n", s, size); return -1; } else if (memcmp(buf, s->conf.macaddr.a, 6) == 0) { /* !!! */ /* Frame matches individual address. */ /* TODO: check configuration byte 15/4 (ignore U/L). */ TRACE(RXTX, logout("%p received frame for me, len=%zu\n", s, size)); } else if (memcmp(buf, broadcast_macaddr, 6) == 0) { /* Broadcast frame. */ TRACE(RXTX, logout("%p received broadcast, len=%zu\n", s, size)); rfd_status |= 0x0002; } else if (buf[0] & 0x01) { /* Multicast frame. */ TRACE(RXTX, logout("%p received multicast, len=%zu,%s\n", s, size, nic_dump(buf, size))); if (s->configuration[21] & BIT(3)) { /* Multicast all bit is set, receive all multicast frames. */ } else { unsigned mcast_idx = e100_compute_mcast_idx(buf); assert(mcast_idx < 64); if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) { /* Multicast frame is allowed in hash table. */ } else if (s->configuration[15] & BIT(0)) { /* Promiscuous: receive all. */ rfd_status |= 0x0004; } else { TRACE(RXTX, logout("%p multicast ignored\n", s)); return -1; } } /* TODO: Next not for promiscuous mode? */ rfd_status |= 0x0002; } else if (s->configuration[15] & BIT(0)) { /* Promiscuous: receive all. */ TRACE(RXTX, logout("%p received frame in promiscuous mode, len=%zu\n", s, size)); rfd_status |= 0x0004; } else if (s->configuration[20] & BIT(6)) { /* Multiple IA bit set. */ unsigned mcast_idx = compute_mcast_idx(buf); assert(mcast_idx < 64); if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) { TRACE(RXTX, logout("%p accepted, multiple IA bit set\n", s)); } else { TRACE(RXTX, logout("%p frame ignored, multiple IA bit set\n", s)); return -1; } } else { TRACE(RXTX, logout("%p received frame, ignored, len=%zu,%s\n", s, size, nic_dump(buf, size))); return size; } if (get_ru_state(s) != ru_ready) { /* No resources available. */ logout("no resources, state=%u\n", get_ru_state(s)); /* TODO: RNR interrupt only at first failed frame? */ eepro100_rnr_interrupt(s); s->statistics.rx_resource_errors++; #if 0 assert(!"no resources"); #endif return -1; } /* !!! */ eepro100_rx_t rx; pci_dma_read(&s->dev, s->ru_base + s->ru_offset, &rx, sizeof(eepro100_rx_t)); uint16_t rfd_command = le16_to_cpu(rx.command); uint16_t rfd_size = le16_to_cpu(rx.size); if (size > rfd_size) { logout("Receive buffer (%" PRId16 " bytes) too small for data " "(%zu bytes); data truncated\n", rfd_size, size); size = rfd_size; } #if !defined(CONFIG_PAD_RECEIVED_FRAMES) if (size < 64) { rfd_status |= 0x0080; } #endif TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n", rfd_command, rx.link, rx.rx_buf_addr, rfd_size)); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, status), rfd_status); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, count), size); /* Early receive interrupt not supported. */ #if 0 eepro100_er_interrupt(s); #endif /* Receive CRC Transfer not supported. */ if (s->configuration[18] & BIT(2)) { missing("Receive CRC Transfer"); return -1; } /* TODO: check stripping enable bit. */ #if 0 assert(!(s->configuration[17] & BIT(0))); #endif pci_dma_write(&s->dev, s->ru_base + s->ru_offset + sizeof(eepro100_rx_t), buf, size); s->statistics.rx_good_frames++; eepro100_fr_interrupt(s); s->ru_offset = le32_to_cpu(rx.link); if (rfd_command & COMMAND_EL) { /* EL bit is set, so this was the last frame. */ logout("receive: Running out of frames\n"); set_ru_state(s, ru_no_resources); eepro100_rnr_interrupt(s); } if (rfd_command & COMMAND_S) { /* S bit is set. */ set_ru_state(s, ru_suspended); } return size; } static const VMStateDescription vmstate_eepro100 = { .version_id = 3, .minimum_version_id = 2, .minimum_version_id_old = 2, .fields = (VMStateField []) { VMSTATE_PCI_DEVICE(dev, EEPRO100State), VMSTATE_UNUSED(32), VMSTATE_BUFFER(mult, EEPRO100State), VMSTATE_BUFFER(mem, EEPRO100State), /* Save all members of struct between scb_stat and mem. */ VMSTATE_UINT8(scb_stat, EEPRO100State), VMSTATE_UINT8(int_stat, EEPRO100State), VMSTATE_UNUSED(3*4), VMSTATE_MACADDR(conf.macaddr, EEPRO100State), VMSTATE_UNUSED(19*4), VMSTATE_UINT16_ARRAY(mdimem, EEPRO100State, 32), /* The eeprom should be saved and restored by its own routines. */ VMSTATE_UINT32(device, EEPRO100State), /* TODO check device. */ VMSTATE_UINT32(cu_base, EEPRO100State), VMSTATE_UINT32(cu_offset, EEPRO100State), VMSTATE_UINT32(ru_base, EEPRO100State), VMSTATE_UINT32(ru_offset, EEPRO100State), VMSTATE_UINT32(statsaddr, EEPRO100State), /* Save eepro100_stats_t statistics. */ VMSTATE_UINT32(statistics.tx_good_frames, EEPRO100State), VMSTATE_UINT32(statistics.tx_max_collisions, EEPRO100State), VMSTATE_UINT32(statistics.tx_late_collisions, EEPRO100State), VMSTATE_UINT32(statistics.tx_underruns, EEPRO100State), VMSTATE_UINT32(statistics.tx_lost_crs, EEPRO100State), VMSTATE_UINT32(statistics.tx_deferred, EEPRO100State), VMSTATE_UINT32(statistics.tx_single_collisions, EEPRO100State), VMSTATE_UINT32(statistics.tx_multiple_collisions, EEPRO100State), VMSTATE_UINT32(statistics.tx_total_collisions, EEPRO100State), VMSTATE_UINT32(statistics.rx_good_frames, EEPRO100State), VMSTATE_UINT32(statistics.rx_crc_errors, EEPRO100State), VMSTATE_UINT32(statistics.rx_alignment_errors, EEPRO100State), VMSTATE_UINT32(statistics.rx_resource_errors, EEPRO100State), VMSTATE_UINT32(statistics.rx_overrun_errors, EEPRO100State), VMSTATE_UINT32(statistics.rx_cdt_errors, EEPRO100State), VMSTATE_UINT32(statistics.rx_short_frame_errors, EEPRO100State), VMSTATE_UINT32(statistics.fc_xmt_pause, EEPRO100State), VMSTATE_UINT32(statistics.fc_rcv_pause, EEPRO100State), VMSTATE_UINT32(statistics.fc_rcv_unsupported, EEPRO100State), VMSTATE_UINT16(statistics.xmt_tco_frames, EEPRO100State), VMSTATE_UINT16(statistics.rcv_tco_frames, EEPRO100State), /* Configuration bytes. */ VMSTATE_BUFFER(configuration, EEPRO100State), VMSTATE_END_OF_LIST() } }; static void nic_cleanup(NetClientState *nc) { EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque; s->nic = NULL; } static void pci_nic_uninit(PCIDevice *pci_dev) { EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev); memory_region_destroy(&s->mmio_bar); memory_region_destroy(&s->io_bar); memory_region_destroy(&s->flash_bar); vmstate_unregister(&pci_dev->qdev, s->vmstate, s); eeprom93xx_free(&pci_dev->qdev, s->eeprom); qemu_del_net_client(&s->nic->nc); } static NetClientInfo net_eepro100_info = { .type = NET_CLIENT_OPTIONS_KIND_NIC, .size = sizeof(NICState), .can_receive = nic_can_receive, .receive = nic_receive, .cleanup = nic_cleanup, }; static int e100_nic_init(PCIDevice *pci_dev) { EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev); E100PCIDeviceInfo *info = eepro100_get_class(s); TRACE(OTHER, logout("\n")); s->device = info->device; e100_pci_reset(s); /* Add 64 * 2 EEPROM. i82557 and i82558 support a 64 word EEPROM, * i82559 and later support 64 or 256 word EEPROM. */ s->eeprom = eeprom93xx_new(&pci_dev->qdev, EEPROM_SIZE); /* Handler for memory-mapped I/O */ memory_region_init_io(&s->mmio_bar, &eepro100_ops, s, "eepro100-mmio", PCI_MEM_SIZE); pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->mmio_bar); memory_region_init_io(&s->io_bar, &eepro100_ops, s, "eepro100-io", PCI_IO_SIZE); pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); /* FIXME: flash aliases to mmio?! */ memory_region_init_io(&s->flash_bar, &eepro100_ops, s, "eepro100-flash", PCI_FLASH_SIZE); pci_register_bar(&s->dev, 2, 0, &s->flash_bar); qemu_macaddr_default_if_unset(&s->conf.macaddr); logout("macaddr: %s\n", nic_dump(&s->conf.macaddr.a[0], 6)); nic_reset(s); s->nic = qemu_new_nic(&net_eepro100_info, &s->conf, object_get_typename(OBJECT(pci_dev)), pci_dev->qdev.id, s); qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a); TRACE(OTHER, logout("%s\n", s->nic->nc.info_str)); qemu_register_reset(nic_reset, s); s->vmstate = g_malloc(sizeof(vmstate_eepro100)); memcpy(s->vmstate, &vmstate_eepro100, sizeof(vmstate_eepro100)); s->vmstate->name = s->nic->nc.model; vmstate_register(&pci_dev->qdev, -1, s->vmstate, s); add_boot_device_path(s->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0"); return 0; } static E100PCIDeviceInfo e100_devices[] = { { .name = "i82550", .desc = "Intel i82550 Ethernet", .device = i82550, /* TODO: check device id. */ .device_id = PCI_DEVICE_ID_INTEL_82551IT, /* Revision ID: 0x0c, 0x0d, 0x0e. */ .revision = 0x0e, /* TODO: check size of statistical counters. */ .stats_size = 80, /* TODO: check extended tcb support. */ .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82551", .desc = "Intel i82551 Ethernet", .device = i82551, .device_id = PCI_DEVICE_ID_INTEL_82551IT, /* Revision ID: 0x0f, 0x10. */ .revision = 0x0f, /* TODO: check size of statistical counters. */ .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82557a", .desc = "Intel i82557A Ethernet", .device = i82557A, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x01, .power_management = false, },{ .name = "i82557b", .desc = "Intel i82557B Ethernet", .device = i82557B, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x02, .power_management = false, },{ .name = "i82557c", .desc = "Intel i82557C Ethernet", .device = i82557C, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x03, .power_management = false, },{ .name = "i82558a", .desc = "Intel i82558A Ethernet", .device = i82558A, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x04, .stats_size = 76, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82558b", .desc = "Intel i82558B Ethernet", .device = i82558B, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x05, .stats_size = 76, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82559a", .desc = "Intel i82559A Ethernet", .device = i82559A, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x06, .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82559b", .desc = "Intel i82559B Ethernet", .device = i82559B, .device_id = PCI_DEVICE_ID_INTEL_82557, .revision = 0x07, .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82559c", .desc = "Intel i82559C Ethernet", .device = i82559C, .device_id = PCI_DEVICE_ID_INTEL_82557, #if 0 .revision = 0x08, #endif /* TODO: Windows wants revision id 0x0c. */ .revision = 0x0c, #if EEPROM_SIZE > 0 .subsystem_vendor_id = PCI_VENDOR_ID_INTEL, .subsystem_id = 0x0040, #endif .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82559er", .desc = "Intel i82559ER Ethernet", .device = i82559ER, .device_id = PCI_DEVICE_ID_INTEL_82551IT, .revision = 0x09, .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ .name = "i82562", .desc = "Intel i82562 Ethernet", .device = i82562, /* TODO: check device id. */ .device_id = PCI_DEVICE_ID_INTEL_82551IT, /* TODO: wrong revision id. */ .revision = 0x0e, .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, },{ /* Toshiba Tecra 8200. */ .name = "i82801", .desc = "Intel i82801 Ethernet", .device = i82801, .device_id = 0x2449, .revision = 0x03, .stats_size = 80, .has_extended_tcb_support = true, .power_management = true, } }; static E100PCIDeviceInfo *eepro100_get_class_by_name(const char *typename) { E100PCIDeviceInfo *info = NULL; int i; /* This is admittedly awkward but also temporary. QOM allows for * parameterized typing and for subclassing both of which would suitable * handle what's going on here. But class_data is already being used as * a stop-gap hack to allow incremental qdev conversion so we cannot use it * right now. Once we merge the final QOM series, we can come back here and * do this in a much more elegant fashion. */ for (i = 0; i < ARRAY_SIZE(e100_devices); i++) { if (strcmp(e100_devices[i].name, typename) == 0) { info = &e100_devices[i]; break; } } assert(info != NULL); return info; } static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s) { return eepro100_get_class_by_name(object_get_typename(OBJECT(s))); } static Property e100_properties[] = { DEFINE_NIC_PROPERTIES(EEPRO100State, conf), DEFINE_PROP_END_OF_LIST(), }; static void eepro100_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); E100PCIDeviceInfo *info; info = eepro100_get_class_by_name(object_class_get_name(klass)); dc->props = e100_properties; dc->desc = info->desc; k->vendor_id = PCI_VENDOR_ID_INTEL; k->class_id = PCI_CLASS_NETWORK_ETHERNET; k->romfile = "pxe-eepro100.rom"; k->init = e100_nic_init; k->exit = pci_nic_uninit; k->device_id = info->device_id; k->revision = info->revision; k->subsystem_vendor_id = info->subsystem_vendor_id; k->subsystem_id = info->subsystem_id; } static void eepro100_register_types(void) { size_t i; for (i = 0; i < ARRAY_SIZE(e100_devices); i++) { TypeInfo type_info = {}; E100PCIDeviceInfo *info = &e100_devices[i]; type_info.name = info->name; type_info.parent = TYPE_PCI_DEVICE; type_info.class_init = eepro100_class_init; type_info.instance_size = sizeof(EEPRO100State); type_register(&type_info); } } type_init(eepro100_register_types)
piyushroshan/xen-4.3.2
tools/qemu-xen/hw/eepro100.c
C
gpl-2.0
70,695
/* * (C) Copyright David Brownell 2000-2002 * Copyright (c) 2005 MontaVista Software * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided * by Hunter Wu. */ #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include "ehci-fsl.h" /* FIXME: Power Managment is un-ported so temporarily disable it */ #undef CONFIG_PM /* PCI-based HCs are common, but plenty of non-PCI HCs are used too */ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_fsl_probe - initialize FSL-based HCDs * @drvier: Driver to be used for this HCD * @pdev: USB Host Controller being probed * Context: !in_interrupt() * * Allocates basic resources for this USB host controller. * */ int usb_hcd_fsl_probe(const struct hc_driver *driver, struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata; struct usb_hcd *hcd; struct resource *res; int irq; int retval; unsigned int temp; pr_debug("initializing FSL-SOC USB Controller\n"); /* Need platform data for setup */ pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "No platform data for %s.\n", pdev->dev.bus_id); return -ENODEV; } /* * This is a host mode driver, verify that we're supposed to be * in host mode. */ if (!((pdata->operating_mode == FSL_USB2_DR_HOST) || (pdata->operating_mode == FSL_USB2_MPH_HOST))) { dev_err(&pdev->dev, "Non Host Mode configured for %s. Wrong driver linked.\n", pdev->dev.bus_id); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n", pdev->dev.bus_id); return -ENODEV; } irq = res->start; hcd = usb_create_hcd(driver, &pdev->dev, pdev->dev.bus_id); if (!hcd) { retval = -ENOMEM; goto err1; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Found HC with no register addr. Check %s setup!\n", pdev->dev.bus_id); retval = -ENODEV; goto err2; } hcd->rsrc_start = res->start; hcd->rsrc_len = res->end - res->start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&pdev->dev, "controller already in use\n"); retval = -EBUSY; goto err2; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&pdev->dev, "error mapping memory\n"); retval = -EFAULT; goto err3; } /* Enable USB controller */ temp = in_be32(hcd->regs + 0x500); out_be32(hcd->regs + 0x500, temp | 0x4); /* Set to Host mode */ temp = in_le32(hcd->regs + 0x1a8); out_le32(hcd->regs + 0x1a8, temp | 0x3); retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval != 0) goto err4; return retval; err4: iounmap(hcd->regs); err3: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err2: usb_put_hcd(hcd); err1: dev_err(&pdev->dev, "init %s fail, %d\n", pdev->dev.bus_id, retval); return retval; } /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_fsl_remove - shutdown processing for FSL-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_fsl_probe(). * */ void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); } static void mpc83xx_setup_phy(struct ehci_hcd *ehci, enum fsl_usb2_phy_modes phy_mode, unsigned int port_offset) { u32 portsc = 0; switch (phy_mode) { case FSL_USB2_PHY_ULPI: portsc |= PORT_PTS_ULPI; break; case FSL_USB2_PHY_SERIAL: portsc |= PORT_PTS_SERIAL; break; case FSL_USB2_PHY_UTMI_WIDE: portsc |= PORT_PTS_PTW; /* fall through */ case FSL_USB2_PHY_UTMI: portsc |= PORT_PTS_UTMI; break; case FSL_USB2_PHY_NONE: break; } ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]); } static void mpc83xx_usb_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct fsl_usb2_platform_data *pdata; void __iomem *non_ehci = hcd->regs; pdata = (struct fsl_usb2_platform_data *)hcd->self.controller-> platform_data; /* Enable PHY interface in the control reg. */ out_be32(non_ehci + FSL_SOC_USB_CTRL, 0x00000004); out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) /* * Turn on cache snooping hardware, since some PowerPC platforms * wholly rely on hardware to deal with cache coherent */ /* Setup Snooping for all the 4GB space */ /* SNOOP1 starts from 0x0, size 2G */ out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB); /* SNOOP2 starts from 0x80000000, size 2G */ out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB); #endif if (pdata->operating_mode == FSL_USB2_DR_HOST) mpc83xx_setup_phy(ehci, pdata->phy_mode, 0); if (pdata->operating_mode == FSL_USB2_MPH_HOST) { unsigned int chip, rev, svr; svr = mfspr(SPRN_SVR); chip = svr >> 16; rev = (svr >> 4) & 0xf; /* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */ if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055)) ehci->has_fsl_port_bug = 1; if (pdata->port_enables & FSL_USB2_PORT0_ENABLED) mpc83xx_setup_phy(ehci, pdata->phy_mode, 0); if (pdata->port_enables & FSL_USB2_PORT1_ENABLED) mpc83xx_setup_phy(ehci, pdata->phy_mode, 1); } /* put controller in host mode. */ ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE); out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c); out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040); out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001); } /* called after powerup, by probe or system-pm "wakeup" */ static int ehci_fsl_reinit(struct ehci_hcd *ehci) { mpc83xx_usb_setup(ehci_to_hcd(ehci)); ehci_port_power(ehci, 0); return 0; } /* called during probe() after chip reset completes */ static int ehci_fsl_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; /* EHCI registers start at offset 0x100 */ ehci->caps = hcd->regs + 0x100; ehci->regs = hcd->regs + 0x100 + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); retval = ehci_halt(ehci); if (retval) return retval; /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; ehci->is_tdi_rh_tt = 1; ehci->sbrn = 0x20; ehci_reset(ehci); retval = ehci_fsl_reinit(ehci); return retval; } static const struct hc_driver ehci_fsl_hc_driver = { .description = hcd_name, .product_desc = "Freescale On-Chip EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_USB2, /* * basic lifecycle operations */ .reset = ehci_fsl_setup, .start = ehci_run, #ifdef CONFIG_PM .suspend = ehci_bus_suspend, .resume = ehci_bus_resume, #endif .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, }; static int ehci_fsl_drv_probe(struct platform_device *pdev) { if (usb_disabled()) return -ENODEV; return usb_hcd_fsl_probe(&ehci_fsl_hc_driver, pdev); } static int ehci_fsl_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_hcd_fsl_remove(hcd, pdev); return 0; } MODULE_ALIAS("fsl-ehci"); static struct platform_driver ehci_fsl_driver = { .probe = ehci_fsl_drv_probe, .remove = ehci_fsl_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "fsl-ehci", }, };
ut-osa/laminar
linux-2.6.22.6/drivers/usb/host/ehci-fsl.c
C
bsd-3-clause
9,011
/* * pci_irq.c - ACPI PCI Interrupt Routing ($Revision: 11 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> * (c) Copyright 2008 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pm.h> #include <linux/pci.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/interrupt.h> #define PREFIX "ACPI: " #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_irq"); struct acpi_prt_entry { struct acpi_pci_id id; u8 pin; acpi_handle link; u32 index; /* GSI, or link _CRS index */ }; static inline char pin_name(int pin) { return 'A' + pin - 1; } /* -------------------------------------------------------------------------- PCI IRQ Routing Table (PRT) Support -------------------------------------------------------------------------- */ /* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */ static const struct dmi_system_id medion_md9580[] = { { .ident = "Medion MD9580-F laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), DMI_MATCH(DMI_PRODUCT_NAME, "A555"), }, }, { } }; /* http://bugzilla.kernel.org/show_bug.cgi?id=5044 */ static const struct dmi_system_id dell_optiplex[] = { { .ident = "Dell Optiplex GX1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX1 600S+"), }, }, { } }; /* http://bugzilla.kernel.org/show_bug.cgi?id=10138 */ static const struct dmi_system_id hp_t5710[] = { { .ident = "HP t5710", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "hp t5000 series"), DMI_MATCH(DMI_BOARD_NAME, "098Ch"), }, }, { } }; struct prt_quirk { const struct dmi_system_id *system; unsigned int segment; unsigned int bus; unsigned int device; unsigned char pin; const char *source; /* according to BIOS */ const char *actual_source; }; #define PCI_INTX_PIN(c) (c - 'A' + 1) /* * These systems have incorrect _PRT entries. The BIOS claims the PCI * interrupt at the listed segment/bus/device/pin is connected to the first * link device, but it is actually connected to the second. */ static const struct prt_quirk prt_quirks[] = { { medion_md9580, 0, 0, 9, PCI_INTX_PIN('A'), "\\_SB_.PCI0.ISA_.LNKA", "\\_SB_.PCI0.ISA_.LNKB"}, { dell_optiplex, 0, 0, 0xd, PCI_INTX_PIN('A'), "\\_SB_.LNKB", "\\_SB_.LNKA"}, { hp_t5710, 0, 0, 1, PCI_INTX_PIN('A'), "\\_SB_.PCI0.LNK1", "\\_SB_.PCI0.LNK3"}, }; static void do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) { int i; const struct prt_quirk *quirk; for (i = 0; i < ARRAY_SIZE(prt_quirks); i++) { quirk = &prt_quirks[i]; /* All current quirks involve link devices, not GSIs */ if (dmi_check_system(quirk->system) && entry->id.segment == quirk->segment && entry->id.bus == quirk->bus && entry->id.device == quirk->device && entry->pin == quirk->pin && !strcmp(prt->source, quirk->source) && strlen(prt->source) >= strlen(quirk->actual_source)) { printk(KERN_WARNING PREFIX "firmware reports " "%04x:%02x:%02x PCI INT %c connected to %s; " "changing to %s\n", entry->id.segment, entry->id.bus, entry->id.device, pin_name(entry->pin), prt->source, quirk->actual_source); strcpy(prt->source, quirk->actual_source); } } } static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, int pin, struct acpi_pci_routing_table *prt, struct acpi_prt_entry **entry_ptr) { int segment = pci_domain_nr(dev->bus); int bus = dev->bus->number; int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn); struct acpi_prt_entry *entry; if (((prt->address >> 16) & 0xffff) != device || prt->pin + 1 != pin) return -ENODEV; entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); if (!entry) return -ENOMEM; /* * Note that the _PRT uses 0=INTA, 1=INTB, etc, while PCI uses * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert * it here. */ entry->id.segment = segment; entry->id.bus = bus; entry->id.device = (prt->address >> 16) & 0xFFFF; entry->pin = prt->pin + 1; do_prt_fixups(entry, prt); entry->index = prt->source_index; /* * Type 1: Dynamic * --------------- * The 'source' field specifies the PCI interrupt link device used to * configure the IRQ assigned to this slot|dev|pin. The 'source_index' * indicates which resource descriptor in the resource template (of * the link device) this interrupt is allocated from. * * NOTE: Don't query the Link Device for IRQ information at this time * because Link Device enumeration may not have occurred yet * (e.g. exists somewhere 'below' this _PRT entry in the ACPI * namespace). */ if (prt->source[0]) acpi_get_handle(handle, prt->source, &entry->link); /* * Type 2: Static * -------------- * The 'source' field is NULL, and the 'source_index' field specifies * the IRQ value, which is hardwired to specific interrupt inputs on * the interrupt controller. */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, " %04x:%02x:%02x[%c] -> %s[%d]\n", entry->id.segment, entry->id.bus, entry->id.device, pin_name(entry->pin), prt->source, entry->index)); *entry_ptr = entry; return 0; } static int acpi_pci_irq_find_prt_entry(struct pci_dev *dev, int pin, struct acpi_prt_entry **entry_ptr) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_pci_routing_table *entry; acpi_handle handle = NULL; if (dev->bus->bridge) handle = ACPI_HANDLE(dev->bus->bridge); if (!handle) return -ENODEV; /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */ status = acpi_get_irq_routing_table(handle, &buffer); if (ACPI_FAILURE(status)) { kfree(buffer.pointer); return -ENODEV; } entry = buffer.pointer; while (entry && (entry->length > 0)) { if (!acpi_pci_irq_check_entry(handle, dev, pin, entry, entry_ptr)) break; entry = (struct acpi_pci_routing_table *) ((unsigned long)entry + entry->length); } kfree(buffer.pointer); return 0; } /* -------------------------------------------------------------------------- PCI Interrupt Routing Support -------------------------------------------------------------------------- */ #ifdef CONFIG_X86_IO_APIC extern int noioapicquirk; extern int noioapicreroute; static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) { struct pci_bus *bus_it; for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { if (!bus_it->self) return 0; if (bus_it->self->irq_reroute_variant) return bus_it->self->irq_reroute_variant; } return 0; } /* * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does * during interrupt handling). When this INTx generation cannot be disabled, * we reroute these interrupts to their legacy equivalent to get rid of * spurious interrupts. */ static int acpi_reroute_boot_interrupt(struct pci_dev *dev, struct acpi_prt_entry *entry) { if (noioapicquirk || noioapicreroute) { return 0; } else { switch (bridge_has_boot_interrupt_variant(dev->bus)) { case 0: /* no rerouting necessary */ return 0; case INTEL_IRQ_REROUTE_VARIANT: /* * Remap according to INTx routing table in 6700PXH * specs, intel order number 302628-002, section * 2.15.2. Other chipsets (80332, ...) have the same * mapping and are handled here as well. */ dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy " "IRQ %d\n", entry->index, (entry->index % 4) + 16); entry->index = (entry->index % 4) + 16; return 1; default: dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy " "IRQ: unknown mapping\n", entry->index); return -1; } } } #endif /* CONFIG_X86_IO_APIC */ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) { struct acpi_prt_entry *entry = NULL; struct pci_dev *bridge; u8 bridge_pin, orig_pin = pin; int ret; ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry); if (!ret && entry) { #ifdef CONFIG_X86_IO_APIC acpi_reroute_boot_interrupt(dev, entry); #endif /* CONFIG_X86_IO_APIC */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", pci_name(dev), pin_name(pin))); return entry; } /* * Attempt to derive an IRQ for this device from a parent bridge's * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). */ bridge = dev->bus->self; while (bridge) { pin = pci_swizzle_interrupt_pin(dev, pin); if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) { /* PC card has the same IRQ as its cardbridge */ bridge_pin = bridge->pin; if (!bridge_pin) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No interrupt pin configured for device %s\n", pci_name(bridge))); return NULL; } pin = bridge_pin; } ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry); if (!ret && entry) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derived GSI for %s INT %c from %s\n", pci_name(dev), pin_name(orig_pin), pci_name(bridge))); return entry; } dev = bridge; bridge = dev->bus->self; } dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", pin_name(orig_pin)); return NULL; } #if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA) static int acpi_isa_register_gsi(struct pci_dev *dev) { u32 dev_gsi; /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); acpi_register_gsi(&dev->dev, dev_gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } return -EINVAL; } #else static inline int acpi_isa_register_gsi(struct pci_dev *dev) { return -ENODEV; } #endif static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin) { #ifdef CONFIG_X86 /* * On x86 irq line 0xff means "unknown" or "no connection" * (PCI 3.0, Section 6.2.4, footnote on page 223). */ if (dev->irq == 0xff) { dev->irq = IRQ_NOTCONNECTED; dev_warn(&dev->dev, "PCI INT %c: not connected\n", pin_name(pin)); return false; } #endif return true; } int acpi_pci_irq_enable(struct pci_dev *dev) { struct acpi_prt_entry *entry; int gsi; u8 pin; int triggering = ACPI_LEVEL_SENSITIVE; /* * On ARM systems with the GIC interrupt model, level interrupts * are always polarity high by specification; PCI legacy * IRQs lines are inverted before reaching the interrupt * controller and must therefore be considered active high * as default. */ int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW; char *link = NULL; char link_desc[16]; int rc; pin = dev->pin; if (!pin) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No interrupt pin configured for device %s\n", pci_name(dev))); return 0; } if (dev->irq_managed && dev->irq > 0) return 0; entry = acpi_pci_irq_lookup(dev, pin); if (!entry) { /* * IDE legacy mode controller IRQs are magic. Why do compat * extensions always make such a nasty mess. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && (dev->class & 0x05) == 0) return 0; } if (entry) { if (entry->link) gsi = acpi_pci_link_allocate_irq(entry->link, entry->index, &triggering, &polarity, &link); else gsi = entry->index; } else gsi = -1; if (gsi < 0) { /* * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. */ if (!acpi_pci_irq_valid(dev, pin)) { kfree(entry); return 0; } if (acpi_isa_register_gsi(dev)) dev_warn(&dev->dev, "PCI INT %c: no GSI\n", pin_name(pin)); kfree(entry); return 0; } rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); if (rc < 0) { dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", pin_name(pin)); kfree(entry); return rc; } dev->irq = rc; dev->irq_managed = 1; if (link) snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); else link_desc[0] = '\0'; dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", pin_name(pin), link_desc, gsi, (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); kfree(entry); return 0; } void acpi_pci_irq_disable(struct pci_dev *dev) { struct acpi_prt_entry *entry; int gsi; u8 pin; pin = dev->pin; if (!pin || !dev->irq_managed || dev->irq <= 0) return; /* Keep IOAPIC pin configuration when suspending */ if (dev->dev.power.is_prepared) return; #ifdef CONFIG_PM if (dev->dev.power.runtime_status == RPM_SUSPENDING) return; #endif entry = acpi_pci_irq_lookup(dev, pin); if (!entry) return; if (entry->link) gsi = acpi_pci_link_free_irq(entry->link); else gsi = entry->index; kfree(entry); /* * TBD: It might be worth clearing dev->irq by magic constant * (e.g. PCI_UNDEFINED_IRQ). */ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); if (gsi >= 0) { acpi_unregister_gsi(gsi); dev->irq_managed = 0; } }
Fe-Pi/linux
drivers/acpi/pci_irq.c
C
gpl-2.0
14,413
/* aes256_dec.c */ /* This file is part of the AVR-Crypto-Lib. Copyright (C) 2008 Daniel Otte (daniel.otte@rub.de) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * \file aes256_dec.c * \email daniel.otte@rub.de * \author Daniel Otte * \date 2008-12-31 * \license GPLv3 or later * */ #include "aes.h" #include "aes_dec.h" void aes256_dec(void* buffer, aes256_ctx_t* ctx){ aes_decrypt_core(buffer, (aes_genctx_t*)ctx, 14); }
muccc/luftschleuse2
software/avr-crypto-lib/aes/aes256_dec.c
C
gpl-3.0
1,072
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> typedef struct { uint32_t percent; ngx_http_variable_value_t value; } ngx_http_split_clients_part_t; typedef struct { ngx_http_complex_value_t value; ngx_array_t parts; } ngx_http_split_clients_ctx_t; static char *ngx_conf_split_clients_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_split_clients(ngx_conf_t *cf, ngx_command_t *dummy, void *conf); static ngx_command_t ngx_http_split_clients_commands[] = { { ngx_string("split_clients"), NGX_HTTP_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_TAKE2, ngx_conf_split_clients_block, NGX_HTTP_MAIN_CONF_OFFSET, 0, NULL }, ngx_null_command }; static ngx_http_module_t ngx_http_split_clients_module_ctx = { NULL, /* preconfiguration */ NULL, /* postconfiguration */ NULL, /* create main configuration */ NULL, /* init main configuration */ NULL, /* create server configuration */ NULL, /* merge server configuration */ NULL, /* create location configuration */ NULL /* merge location configuration */ }; ngx_module_t ngx_http_split_clients_module = { NGX_MODULE_V1, &ngx_http_split_clients_module_ctx, /* module context */ ngx_http_split_clients_commands, /* module directives */ NGX_HTTP_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ NULL, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING }; static ngx_int_t ngx_http_split_clients_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { ngx_http_split_clients_ctx_t *ctx = (ngx_http_split_clients_ctx_t *) data; uint32_t hash; ngx_str_t val; ngx_uint_t i; ngx_http_split_clients_part_t *part; *v = ngx_http_variable_null_value; if (ngx_http_complex_value(r, &ctx->value, &val) != NGX_OK) { return NGX_OK; } hash = ngx_murmur_hash2(val.data, val.len); part = ctx->parts.elts; for (i = 0; i < ctx->parts.nelts; i++) { ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http split: %uD %uD", hash, part[i].percent); if (hash < part[i].percent) { *v = part[i].value; return NGX_OK; } } return NGX_OK; } static char * ngx_conf_split_clients_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { char *rv; ngx_str_t *value, name; ngx_uint_t i, sum, last; ngx_conf_t save; ngx_http_variable_t *var; ngx_http_split_clients_ctx_t *ctx; ngx_http_split_clients_part_t *part; ngx_http_compile_complex_value_t ccv; ctx = ngx_pcalloc(cf->pool, sizeof(ngx_http_split_clients_ctx_t)); if (ctx == NULL) { return NGX_CONF_ERROR; } value = cf->args->elts; ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); ccv.cf = cf; ccv.value = &value[1]; ccv.complex_value = &ctx->value; if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { return NGX_CONF_ERROR; } name = value[2]; name.len--; name.data++; var = ngx_http_add_variable(cf, &name, NGX_HTTP_VAR_CHANGEABLE); if (var == NULL) { return NGX_CONF_ERROR; } var->get_handler = ngx_http_split_clients_variable; var->data = (uintptr_t) ctx; if (ngx_array_init(&ctx->parts, cf->pool, 2, sizeof(ngx_http_split_clients_part_t)) != NGX_OK) { return NGX_CONF_ERROR; } save = *cf; cf->ctx = ctx; cf->handler = ngx_http_split_clients; cf->handler_conf = conf; rv = ngx_conf_parse(cf, NULL); *cf = save; if (rv != NGX_CONF_OK) { return rv; } sum = 0; last = 0; part = ctx->parts.elts; for (i = 0; i < ctx->parts.nelts; i++) { sum = part[i].percent ? sum + part[i].percent : 10000; if (sum > 10000) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "percent sum is more than 100%%"); return NGX_CONF_ERROR; } if (part[i].percent) { part[i].percent = (uint32_t) (last + 0xffffffff / 10000 * part[i].percent); } else { part[i].percent = 0xffffffff; } last = part[i].percent; } return rv; } static char * ngx_http_split_clients(ngx_conf_t *cf, ngx_command_t *dummy, void *conf) { ngx_int_t n; ngx_str_t *value; ngx_http_split_clients_ctx_t *ctx; ngx_http_split_clients_part_t *part; ctx = cf->ctx; value = cf->args->elts; part = ngx_array_push(&ctx->parts); if (part == NULL) { return NGX_CONF_ERROR; } if (value[0].len == 1 && value[0].data[0] == '*') { part->percent = 0; } else { if (value[0].data[value[0].len - 1] != '%') { goto invalid; } n = ngx_atofp(value[0].data, value[0].len - 1, 2); if (n == NGX_ERROR || n == 0) { goto invalid; } part->percent = (uint32_t) n; } part->value.len = value[1].len; part->value.valid = 1; part->value.no_cacheable = 0; part->value.not_found = 0; part->value.data = value[1].data; return NGX_CONF_OK; invalid: ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid percent value \"%V\"", &value[0]); return NGX_CONF_ERROR; }
huangjilaiqin/nginx
src/http/modules/ngx_http_split_clients_module.c
C
mit
6,443
/* * Ioctl handler * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/if_bridge.h> #include <linux/netdevice.h> #include <linux/times.h> #include <net/net_namespace.h> #include <asm/uaccess.h> #include "br_private.h" /* called with RTNL */ static int get_bridge_ifindices(int *indices, int num) { struct net_device *dev; int i = 0; for_each_netdev(&init_net, dev) { if (i >= num) break; if (dev->priv_flags & IFF_EBRIDGE) indices[i++] = dev->ifindex; } return i; } /* called with RTNL */ static void get_port_ifindices(struct net_bridge *br, int *ifindices, int num) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->port_no < num) ifindices[p->port_no] = p->dev->ifindex; } } /* * Format up to a page worth of forwarding table entries * userbuf -- where to copy result * maxnum -- maximum number of entries desired * (limited to a page for sanity) * offset -- number of records to skip */ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, unsigned long maxnum, unsigned long offset) { int num; void *buf; size_t size; /* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */ if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry)) maxnum = PAGE_SIZE/sizeof(struct __fdb_entry); size = maxnum * sizeof(struct __fdb_entry); buf = kmalloc(size, GFP_USER); if (!buf) return -ENOMEM; num = br_fdb_fillbuf(br, buf, maxnum, offset); if (num > 0) { if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry))) num = -EFAULT; } kfree(buf); return num; } static int add_del_if(struct net_bridge *br, int ifindex, int isadd) { struct net_device *dev; int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; dev = dev_get_by_index(&init_net, ifindex); if (dev == NULL) return -EINVAL; if (isadd) ret = br_add_if(br, dev); else ret = br_del_if(br, dev); dev_put(dev); return ret; } /* * Legacy ioctl's through SIOCDEVPRIVATE * This interface is deprecated because it was too difficult to * to do the translation for 32/64bit ioctl compatability. */ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_bridge *br = netdev_priv(dev); unsigned long args[4]; if (copy_from_user(args, rq->ifr_data, sizeof(args))) return -EFAULT; switch (args[0]) { case BRCTL_ADD_IF: case BRCTL_DEL_IF: return add_del_if(br, args[1], args[0] == BRCTL_ADD_IF); case BRCTL_GET_BRIDGE_INFO: { struct __bridge_info b; memset(&b, 0, sizeof(struct __bridge_info)); rcu_read_lock(); memcpy(&b.designated_root, &br->designated_root, 8); memcpy(&b.bridge_id, &br->bridge_id, 8); b.root_path_cost = br->root_path_cost; b.max_age = jiffies_to_clock_t(br->max_age); b.hello_time = jiffies_to_clock_t(br->hello_time); b.forward_delay = br->forward_delay; b.bridge_max_age = br->bridge_max_age; b.bridge_hello_time = br->bridge_hello_time; b.bridge_forward_delay = jiffies_to_clock_t(br->bridge_forward_delay); b.topology_change = br->topology_change; b.topology_change_detected = br->topology_change_detected; b.root_port = br->root_port; b.stp_enabled = (br->stp_enabled != BR_NO_STP); b.ageing_time = jiffies_to_clock_t(br->ageing_time); b.hello_timer_value = br_timer_value(&br->hello_timer); b.tcn_timer_value = br_timer_value(&br->tcn_timer); b.topology_change_timer_value = br_timer_value(&br->topology_change_timer); b.gc_timer_value = br_timer_value(&br->gc_timer); rcu_read_unlock(); if (copy_to_user((void __user *)args[1], &b, sizeof(b))) return -EFAULT; return 0; } case BRCTL_GET_PORT_LIST: { int num, *indices; num = args[2]; if (num < 0) return -EINVAL; if (num == 0) num = 256; if (num > BR_MAX_PORTS) num = BR_MAX_PORTS; indices = kcalloc(num, sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; get_port_ifindices(br, indices, num); if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) num = -EFAULT; kfree(indices); return num; } case BRCTL_SET_BRIDGE_FORWARD_DELAY: if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); br->bridge_forward_delay = clock_t_to_jiffies(args[1]); if (br_is_root_bridge(br)) br->forward_delay = br->bridge_forward_delay; spin_unlock_bh(&br->lock); return 0; case BRCTL_SET_BRIDGE_HELLO_TIME: { unsigned long t = clock_t_to_jiffies(args[1]); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (t < HZ) return -EINVAL; spin_lock_bh(&br->lock); br->bridge_hello_time = t; if (br_is_root_bridge(br)) br->hello_time = br->bridge_hello_time; spin_unlock_bh(&br->lock); return 0; } case BRCTL_SET_BRIDGE_MAX_AGE: if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); br->bridge_max_age = clock_t_to_jiffies(args[1]); if (br_is_root_bridge(br)) br->max_age = br->bridge_max_age; spin_unlock_bh(&br->lock); return 0; case BRCTL_SET_AGEING_TIME: if (!capable(CAP_NET_ADMIN)) return -EPERM; br->ageing_time = clock_t_to_jiffies(args[1]); return 0; case BRCTL_GET_PORT_INFO: { struct __port_info p; struct net_bridge_port *pt; rcu_read_lock(); if ((pt = br_get_port(br, args[2])) == NULL) { rcu_read_unlock(); return -EINVAL; } memset(&p, 0, sizeof(struct __port_info)); memcpy(&p.designated_root, &pt->designated_root, 8); memcpy(&p.designated_bridge, &pt->designated_bridge, 8); p.port_id = pt->port_id; p.designated_port = pt->designated_port; p.path_cost = pt->path_cost; p.designated_cost = pt->designated_cost; p.state = pt->state; p.top_change_ack = pt->topology_change_ack; p.config_pending = pt->config_pending; p.message_age_timer_value = br_timer_value(&pt->message_age_timer); p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer); p.hold_timer_value = br_timer_value(&pt->hold_timer); rcu_read_unlock(); if (copy_to_user((void __user *)args[1], &p, sizeof(p))) return -EFAULT; return 0; } case BRCTL_SET_BRIDGE_STP_STATE: if (!capable(CAP_NET_ADMIN)) return -EPERM; br_stp_set_enabled(br, args[1]); return 0; case BRCTL_SET_BRIDGE_PRIORITY: if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); br_stp_set_bridge_priority(br, args[1]); spin_unlock_bh(&br->lock); return 0; case BRCTL_SET_PORT_PRIORITY: { struct net_bridge_port *p; int ret = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (args[2] >= (1<<(16-BR_PORT_BITS))) return -ERANGE; spin_lock_bh(&br->lock); if ((p = br_get_port(br, args[1])) == NULL) ret = -EINVAL; else br_stp_set_port_priority(p, args[2]); spin_unlock_bh(&br->lock); return ret; } case BRCTL_SET_PATH_COST: { struct net_bridge_port *p; int ret = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if ((p = br_get_port(br, args[1])) == NULL) ret = -EINVAL; else br_stp_set_path_cost(p, args[2]); return ret; } case BRCTL_GET_FDB_ENTRIES: return get_fdb_entries(br, (void __user *)args[1], args[2], args[3]); } return -EOPNOTSUPP; } static int old_deviceless(void __user *uarg) { unsigned long args[3]; if (copy_from_user(args, uarg, sizeof(args))) return -EFAULT; switch (args[0]) { case BRCTL_GET_VERSION: return BRCTL_VERSION; case BRCTL_GET_BRIDGES: { int *indices; int ret = 0; if (args[2] >= 2048) return -ENOMEM; indices = kcalloc(args[2], sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; args[2] = get_bridge_ifindices(indices, args[2]); ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) ? -EFAULT : args[2]; kfree(indices); return ret; } case BRCTL_ADD_BRIDGE: case BRCTL_DEL_BRIDGE: { char buf[IFNAMSIZ]; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) return -EFAULT; buf[IFNAMSIZ-1] = 0; if (args[0] == BRCTL_ADD_BRIDGE) return br_add_bridge(buf); return br_del_bridge(buf); } } return -EOPNOTSUPP; } int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uarg) { switch (cmd) { case SIOCGIFBR: case SIOCSIFBR: return old_deviceless(uarg); case SIOCBRADDBR: case SIOCBRDELBR: { char buf[IFNAMSIZ]; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(buf, uarg, IFNAMSIZ)) return -EFAULT; buf[IFNAMSIZ-1] = 0; if (cmd == SIOCBRADDBR) return br_add_bridge(buf); return br_del_bridge(buf); } } return -EOPNOTSUPP; } int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_bridge *br = netdev_priv(dev); switch(cmd) { case SIOCDEVPRIVATE: return old_dev_ioctl(dev, rq, cmd); case SIOCBRADDIF: case SIOCBRDELIF: return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF); } pr_debug("Bridge does not support ioctl 0x%x\n", cmd); return -EOPNOTSUPP; }
getitnowmarketing/archos_kernel_27
net/bridge/br_ioctl.c
C
gpl-2.0
9,288
#include <linux/string.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <mach/am_regs.h> #include <linux/amports/canvas.h> #include <linux/amports/vframe.h> #include <linux/amports/vframe_provider.h> #include "deinterlace.h" #ifdef DEBUG unsigned di_pre_underflow = 0, di_pre_overflow = 0; unsigned long debug_array[4 * 1024]; #endif #if 1 #define RECEIVER_NAME "amvideo" #else #define RECEIVER_NAME "deinterlace" #endif #define PATTERN32_NUM 2 #define PATTERN22_NUM 32 #if (PATTERN22_NUM < 32) #define PATTERN22_MARK ((1LL<<PATTERN22_NUM)-1) #elif (PATTERN22_NUM < 64) #define PATTERN22_MARK ((0x100000000LL<<(PATTERN22_NUM-32))-1) #else #define PATTERN22_MARK 0xffffffffffffffffLL #endif #define PRE_HOLD_LINE 4 #define DI_PRE_INTERVAL (HZ/100) // 0 - off // 1 - pre-post link // 2 - pre-post separate, only post in vsync static int deinterlace_mode = 0; #if defined(CONFIG_ARCH_MESON2) static int noise_reduction_level = 2; #endif static struct timer_list di_pre_timer; static struct work_struct di_pre_work; int di_pre_recycle_buf = -1; int prev_struct = 0; int prog_field_count = 0; int buf_recycle_done = 1; int di_pre_post_done = 1; int field_counter = 0, pre_field_counter = 0, di_checked_field = 0; int pattern_len = 0; int di_p32_counter = 0; unsigned int last_big_data = 0, last_big_num = 0; unsigned long blend_mode, pattern_22, di_info[4][83]; unsigned long long di_p32_info, di_p22_info, di_p32_info_2, di_p22_info_2; vframe_t *cur_buf; vframe_t di_buf_pool[DI_BUF_NUM]; DI_MIF_t di_inp_top_mif; DI_MIF_t di_inp_bot_mif; DI_MIF_t di_mem_mif; DI_MIF_t di_buf0_mif; DI_MIF_t di_buf1_mif; DI_MIF_t di_chan2_mif; DI_SIM_MIF_t di_nrwr_mif; DI_SIM_MIF_t di_mtnwr_mif; DI_SIM_MIF_t di_mtncrd_mif; DI_SIM_MIF_t di_mtnprd_mif; unsigned di_mem_start; int vdin_en = 0; vframe_t dummy_buf; int get_deinterlace_mode(void) { return deinterlace_mode; } void set_deinterlace_mode(int mode) { deinterlace_mode = mode; } #if defined(CONFIG_ARCH_MESON2) int get_noise_reduction_level(void) { return noise_reduction_level; } void set_noise_reduction_level(int level) { noise_reduction_level = level; } #endif int get_di_pre_recycle_buf(void) { return di_pre_recycle_buf; } vframe_t *peek_di_out_buf(void) { if (field_counter <= pre_field_counter - 2) { return &(di_buf_pool[field_counter % DI_BUF_NUM]); } else { return NULL; } } void inc_field_counter(void) { field_counter++; } void set_post_di_mem(int mode) { unsigned temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((field_counter + di_checked_field) % DI_BUF_NUM); canvas_config(di_buf0_mif.canvas0_addr0, temp, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); if (mode == 1) { temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((field_counter + di_checked_field + 1) % DI_BUF_NUM); } else { temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((field_counter + di_checked_field - 1) % DI_BUF_NUM); } canvas_config(di_buf1_mif.canvas0_addr0, temp, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((field_counter + di_checked_field) % DI_BUF_NUM) + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT); canvas_config(di_mtncrd_mif.canvas_num, temp, MAX_CANVAS_WIDTH / 2, MAX_CANVAS_HEIGHT / 2, 0, 0); temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((field_counter + di_checked_field + 1) % DI_BUF_NUM) + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT); canvas_config(di_mtnprd_mif.canvas_num, temp, MAX_CANVAS_WIDTH / 2, MAX_CANVAS_HEIGHT / 2, 0, 0); } void disable_deinterlace(void) { WRITE_MPEG_REG(DI_PRE_CTRL, 0x3 << 30); WRITE_MPEG_REG(DI_POST_CTRL, 0x3 << 30); WRITE_MPEG_REG(DI_PRE_SIZE, (32 - 1) | ((64 - 1) << 16)); WRITE_MPEG_REG(DI_POST_SIZE, (32 - 1) | ((128 - 1) << 16)); WRITE_MPEG_REG(DI_INP_GEN_REG, READ_MPEG_REG(DI_INP_GEN_REG) & 0xfffffffe); WRITE_MPEG_REG(DI_MEM_GEN_REG, READ_MPEG_REG(DI_MEM_GEN_REG) & 0xfffffffe); WRITE_MPEG_REG(DI_CHAN2_GEN_REG, READ_MPEG_REG(DI_CHAN2_GEN_REG) & 0xfffffffe); WRITE_MPEG_REG(DI_IF1_GEN_REG, READ_MPEG_REG(DI_IF1_GEN_REG) & 0xfffffffe); } void disable_pre_deinterlace(void) { unsigned status = READ_MPEG_REG(DI_PRE_CTRL) & 0x2; if (prev_struct > 0) { unsigned temp = READ_MPEG_REG(DI_PRE_SIZE); unsigned total = (temp & 0xffff) * ((temp >> 16) & 0xffff); unsigned count = 0; while ((READ_MPEG_REG(DI_INTR_CTRL) & 0xf) != (status | 0x9)) { if (count++ >= total) { break; } } WRITE_MPEG_REG(DI_INTR_CTRL, READ_MPEG_REG(DI_INTR_CTRL)); } WRITE_MPEG_REG(DI_INP_GEN_REG, READ_MPEG_REG(DI_INP_GEN_REG) & 0xfffffffe); WRITE_MPEG_REG(DI_MEM_GEN_REG, READ_MPEG_REG(DI_MEM_GEN_REG) & 0xfffffffe); WRITE_MPEG_REG(DI_CHAN2_GEN_REG, READ_MPEG_REG(DI_CHAN2_GEN_REG) & 0xfffffffe); #ifdef DEBUG di_pre_underflow = 0; di_pre_overflow = 0; #endif prev_struct = 0; prog_field_count = 0; buf_recycle_done = 1; di_pre_post_done = 1; pre_field_counter = field_counter; di_pre_recycle_buf = -1; WRITE_MPEG_REG(DI_PRE_CTRL, 0x3 << 30); WRITE_MPEG_REG(DI_PRE_SIZE, (32 - 1) | ((64 - 1) << 16)); } void disable_post_deinterlace(void) { WRITE_MPEG_REG(DI_POST_CTRL, 0x3 << 30); WRITE_MPEG_REG(DI_POST_SIZE, (32 - 1) | ((128 - 1) << 16)); WRITE_MPEG_REG(DI_IF1_GEN_REG, READ_MPEG_REG(DI_IF1_GEN_REG) & 0xfffffffe); } void set_vd1_fmt_more( int hfmt_en, int hz_yc_ratio, //2bit int hz_ini_phase, //4bit int vfmt_en, int vt_yc_ratio, //2bit int vt_ini_phase, //4bit int y_length, int c_length, int hz_rpt //1bit ) { int vt_phase_step = (16 >> vt_yc_ratio); WRITE_MPEG_REG(VIU_VD1_FMT_CTRL, (hz_rpt << 28) | // hz rpt pixel (hz_ini_phase << 24) | // hz ini phase (0 << 23) | // repeat p0 enable (hz_yc_ratio << 21) | // hz yc ratio (hfmt_en << 20) | // hz enable (1 << 17) | // nrpt_phase0 enable (0 << 16) | // repeat l0 enable (0 << 12) | // skip line num (vt_ini_phase << 8) | // vt ini phase (vt_phase_step << 1) | // vt phase step (3.4) (vfmt_en << 0) // vt enable ); WRITE_MPEG_REG(VIU_VD1_FMT_W, (y_length << 16) | // hz format width (c_length << 0) // vt format width ); } void initial_di_prepost(int hsize_pre, int vsize_pre, int hsize_post, int vsize_post, int hold_line) { WRITE_MPEG_REG(DI_PRE_SIZE, (hsize_pre - 1) | ((vsize_pre - 1) << 16)); WRITE_MPEG_REG(DI_POST_SIZE, (hsize_post - 1) | ((vsize_post - 1) << 16)); WRITE_MPEG_REG(DI_BLEND_CTRL, (0x2 << 20) | // top mode. EI only 25); // KDEINT WRITE_MPEG_REG(DI_EI_CTRL0, (255 << 16) | // ei_filter. (5 << 8) | // ei_threshold. (1 << 2) | // ei bypass cf2. (1 << 1)); // ei bypass far1 WRITE_MPEG_REG(DI_EI_CTRL1, (180 << 24) | // ei diff (10 << 16) | // ei ang45 (15 << 8) | // ei peak. 45); // ei cross. WRITE_MPEG_REG(DI_EI_CTRL2, (10 << 23) | // close2 (10 << 16) | // close1 (10 << 8) | // far2 10); // far1 WRITE_MPEG_REG(DI_PRE_CTRL, 0 | // NR enable (0 << 1) | // MTN_EN (0 << 2) | // check 3:2 pulldown (0 << 3) | // check 2:2 pulldown (0 << 4) | // 2:2 check mid pixel come from next field after MTN. (0 << 5) | // hist check enable (0 << 6) | // hist check not use chan2. (0 << 7) | // hist check use data before noise reduction. (0 << 8) | // chan 2 enable for 2:2 pull down check. (0 << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (1 << 12) | // pre viu link (hold_line << 16) | // pre hold line number (0 << 29) | // pre field number. (0x3 << 30) // pre soft rst, pre frame rst. ); WRITE_MPEG_REG(DI_POST_CTRL, (0 << 0) | // line buffer 0 enable (0 << 1) | // line buffer 1 enable (0 << 2) | // ei enable (0 << 3) | // mtn line buffer enable (0 << 4) | // mtnp read mif enable (0 << 5) | // di blend enble. (0 << 6) | // di mux output enable (0 << 7) | // di write to SDRAM enable. (1 << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (1 << 12) | // post viu link (1 << 13) | // prepost_link (hold_line << 16) | // post hold line number (0 << 29) | // post field number. (0x3 << 30) // post soft rst post frame rst. ); WRITE_MPEG_REG(DI_MC_22LVL0, (READ_MPEG_REG(DI_MC_22LVL0) & 0xffff0000) | 256); // field 22 level WRITE_MPEG_REG(DI_MC_32LVL0, (READ_MPEG_REG(DI_MC_32LVL0) & 0xffffff00) | 16); // field 32 level // set hold line for all ddr req interface. WRITE_MPEG_REG(DI_INP_GEN_REG, (hold_line << 19)); WRITE_MPEG_REG(DI_MEM_GEN_REG, (hold_line << 19)); WRITE_MPEG_REG(VD1_IF0_GEN_REG, (hold_line << 19)); WRITE_MPEG_REG(DI_IF1_GEN_REG, (hold_line << 19)); WRITE_MPEG_REG(DI_CHAN2_GEN_REG, (hold_line << 19)); } void initial_di_pre(int hsize_pre, int vsize_pre, int hold_line) { WRITE_MPEG_REG(DI_PRE_SIZE, (hsize_pre - 1) | ((vsize_pre - 1) << 16)); WRITE_MPEG_REG(DI_PRE_CTRL, 0 | // NR enable (0 << 1) | // MTN_EN (0 << 2) | // check 3:2 pulldown (0 << 3) | // check 2:2 pulldown (0 << 4) | // 2:2 check mid pixel come from next field after MTN. (0 << 5) | // hist check enable (0 << 6) | // hist check not use chan2. (0 << 7) | // hist check use data before noise reduction. (0 << 8) | // chan 2 enable for 2:2 pull down check. (0 << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (0 << 12) | // pre viu link (hold_line << 16) | // pre hold line number (0 << 29) | // pre field number. (0x3 << 30) // pre soft rst, pre frame rst. ); WRITE_MPEG_REG(DI_MC_22LVL0, (READ_MPEG_REG(DI_MC_22LVL0) & 0xffff0000) | 256); // field 22 level WRITE_MPEG_REG(DI_MC_32LVL0, (READ_MPEG_REG(DI_MC_32LVL0) & 0xffffff00) | 16); // field 32 level } void initial_di_post(int hsize_post, int vsize_post, int hold_line) { WRITE_MPEG_REG(DI_POST_SIZE, (hsize_post - 1) | ((vsize_post - 1) << 16)); WRITE_MPEG_REG(DI_BLEND_CTRL, (0x2 << 20) | // top mode. EI only 25); // KDEINT WRITE_MPEG_REG(DI_EI_CTRL0, (255 << 16) | // ei_filter. (5 << 8) | // ei_threshold. (1 << 2) | // ei bypass cf2. (1 << 1)); // ei bypass far1 WRITE_MPEG_REG(DI_EI_CTRL1, (180 << 24) | // ei diff (10 << 16) | // ei ang45 (15 << 8) | // ei peak. 45); // ei cross. WRITE_MPEG_REG(DI_EI_CTRL2, (10 << 23) | // close2 (10 << 16) | // close1 (10 << 8) | // far2 10); // far1 WRITE_MPEG_REG(DI_POST_CTRL, (0 << 0) | // line buffer 0 enable (0 << 1) | // line buffer 1 enable (0 << 2) | // ei enable (0 << 3) | // mtn line buffer enable (0 << 4) | // mtnp read mif enable (0 << 5) | // di blend enble. (0 << 6) | // di mux output enable (0 << 7) | // di write to SDRAM enable. (1 << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (1 << 12) | // post viu link (hold_line << 16) | // post hold line number (0 << 29) | // post field number. (0x3 << 30) // post soft rst post frame rst. ); } void enable_di_mode_check(int win0_start_x, int win0_end_x, int win0_start_y, int win0_end_y, int win1_start_x, int win1_end_x, int win1_start_y, int win1_end_y, int win2_start_x, int win2_end_x, int win2_start_y, int win2_end_y, int win3_start_x, int win3_end_x, int win3_start_y, int win3_end_y, int win4_start_x, int win4_end_x, int win4_start_y, int win4_end_y, int win0_32lvl, int win1_32lvl, int win2_32lvl, int win3_32lvl, int win4_32lvl, int win0_22lvl, int win1_22lvl, int win2_22lvl, int win3_22lvl, int win4_22lvl, int field_32lvl, int field_22lvl) { WRITE_MPEG_REG(DI_MC_REG0_X, (win0_start_x << 16) | // start_x win0_end_x); // end_x WRITE_MPEG_REG(DI_MC_REG0_Y, (win0_start_y << 16) | // start_y win0_end_y); // end_x WRITE_MPEG_REG(DI_MC_REG1_X, (win1_start_x << 16) | // start_x win1_end_x); // end_x WRITE_MPEG_REG(DI_MC_REG1_Y, (win1_start_y << 16) | // start_y win1_end_y); // end_x WRITE_MPEG_REG(DI_MC_REG2_X, (win2_start_x << 16) | // start_x win2_end_x); // end_x WRITE_MPEG_REG(DI_MC_REG2_Y, (win2_start_y << 16) | // start_y win2_end_y); // end_x WRITE_MPEG_REG(DI_MC_REG3_X, (win3_start_x << 16) | // start_x win3_end_x); // end_x WRITE_MPEG_REG(DI_MC_REG3_Y, (win3_start_y << 16) | // start_y win3_end_y); // end_x WRITE_MPEG_REG(DI_MC_REG4_X, (win4_start_x << 16) | // start_x win4_end_x); // end_x WRITE_MPEG_REG(DI_MC_REG4_Y, (win4_start_y << 16) | // start_y win4_end_y); // end_x WRITE_MPEG_REG(DI_MC_32LVL1, win3_32lvl | //region 3 (win4_32lvl << 8)); //region 4 WRITE_MPEG_REG(DI_MC_32LVL0, field_32lvl | //field 32 level (win0_32lvl << 8) | //region 0 (win1_32lvl << 16) | //region 1 (win2_32lvl << 24)); //region 2. WRITE_MPEG_REG(DI_MC_22LVL0, field_22lvl | // field 22 level (win0_22lvl << 16)); // region 0. WRITE_MPEG_REG(DI_MC_22LVL1, win1_22lvl | // region 1 (win2_22lvl << 16)); // region 2. WRITE_MPEG_REG(DI_MC_22LVL2, win3_22lvl | // region 3 (win4_22lvl << 16)); // region 4. WRITE_MPEG_REG(DI_MC_CTRL, 0x1f); // enable region level } // handle all case of prepost link. void enable_di_prepost_full( DI_MIF_t *di_inp_mif, DI_MIF_t *di_mem_mif, DI_MIF_t *di_buf0_mif, DI_MIF_t *di_buf1_mif, DI_MIF_t *di_chan2_mif, DI_SIM_MIF_t *di_nrwr_mif, DI_SIM_MIF_t *di_diwr_mif, DI_SIM_MIF_t *di_mtnwr_mif, DI_SIM_MIF_t *di_mtncrd_mif, DI_SIM_MIF_t *di_mtnprd_mif, int nr_en, int mtn_en, int pd32_check_en, int pd22_check_en, int hist_check_en, int ei_en, int blend_en, int blend_mtn_en, int blend_mode, int di_vpp_en, int di_ddr_en, #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) int nr_hfilt_en, int nr_hfilt_mb_en, int mtn_modify_en, int blend_mtn_filt_en, int blend_data_filt_en, int post_mb_en, #endif int post_field_num, int pre_field_num, int prepost_link, int hold_line) { int hist_check_only; int ei_only; int buf1_en; #if defined(CONFIG_ARCH_MESON2) int nr_zone_0, nr_zone_1, nr_zone_2; if (noise_reduction_level == 0) { nr_zone_0 = 1; nr_zone_1 = 3; nr_zone_2 = 5; } else { nr_zone_0 = 3; nr_zone_1 = 6; nr_zone_2 = 10; } #endif hist_check_only = hist_check_en && !nr_en && !mtn_en && !pd22_check_en && !pd32_check_en; ei_only = ei_en && !blend_en && (di_vpp_en || di_ddr_en); #if defined(CONFIG_ARCH_MESON) buf1_en = (!prepost_link && !ei_only && (di_ddr_en || di_vpp_en)); #elif defined(CONFIG_ARCH_MESON2) if (ei_only) { buf1_en = 0; } else { buf1_en = 1; } #endif if (nr_en | mtn_en | pd22_check_en || pd32_check_en) { set_di_inp_mif(di_inp_mif, di_vpp_en && prepost_link , hold_line); set_di_mem_mif(di_mem_mif, di_vpp_en && prepost_link, hold_line); } if (pd22_check_en || hist_check_only) { set_di_chan2_mif(di_chan2_mif, di_vpp_en && prepost_link, hold_line); } #if defined(CONFIG_ARCH_MESON) if (ei_en || di_vpp_en || di_ddr_en) { set_di_if0_mif(di_buf0_mif, di_vpp_en, hold_line); } if (!prepost_link && !ei_only && (di_ddr_en || di_vpp_en)) { set_di_if1_mif(di_buf1_mif, di_vpp_en, hold_line); } #elif defined(CONFIG_ARCH_MESON2) if (prepost_link && !ei_only && (di_ddr_en || di_vpp_en)) { set_di_if1_mif(di_buf1_mif, di_vpp_en, hold_line); } else if (!prepost_link && (ei_en || di_vpp_en || di_ddr_en)) { set_di_if0_mif(di_buf0_mif, di_vpp_en, hold_line); set_di_if1_mif(di_buf1_mif, di_vpp_en, hold_line); } #endif // set nr wr mif interface. if (nr_en) { WRITE_MPEG_REG(DI_NRWR_X, (di_nrwr_mif->start_x << 16) | (di_nrwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_NRWR_Y, (di_nrwr_mif->start_y << 16) | (di_nrwr_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_NRWR_CTRL, di_nrwr_mif->canvas_num | // canvas index. ((prepost_link && di_vpp_en) << 8)); // urgent. #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_NR_CTRL0, (1 << 31) | // nr yuv enable. (1 << 30) | // nr range. 3 point (0 << 29) | // max of 3 point. (nr_hfilt_en << 28) | // nr hfilter enable. (nr_hfilt_mb_en << 27) | // nr hfilter motion_blur enable. (nr_zone_2 << 16) | // zone 2 (nr_zone_1 << 8) | // zone 1 (nr_zone_0 << 0)); // zone 0 WRITE_MPEG_REG(DI_NR_CTRL2, (10 << 24) | //intra noise level (1 << 16) | // intra no noise level. (10 << 8) | // inter noise level. (1 << 0)); // inter no noise level. WRITE_MPEG_REG(DI_NR_CTRL3, (16 << 16) | // if any one of 3 point mtn larger than 16 don't use 3 point. 720); // if one line eq cnt is larger than this number, this line is not conunted. #endif } // motion wr mif. if (mtn_en) { WRITE_MPEG_REG(DI_MTNWR_X, (di_mtnwr_mif->start_x << 16) | (di_mtnwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNWR_Y, (di_mtnwr_mif->start_y << 16) | (di_mtnwr_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNWR_CTRL, di_mtnwr_mif->canvas_num | // canvas index. ((prepost_link && di_vpp_en) << 8)); // urgent. #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_MTN_CTRL, (1 << 31) | // lpf enable. (1 << 30) | // mtn uv enable. (mtn_modify_en << 29) | // no mtn modify. (2 << 24) | // char diff count. (40 << 16) | // black level. (196 << 8) | // white level. (64 << 0)); // char diff level. WRITE_MPEG_REG(DI_MTN_CTRL1, (3 << 8) | // mtn shift if mtn modifty_en 0); // mtn reduce before shift. #endif } // motion for current display field. #if defined(CONFIG_ARCH_MESON) if (blend_mtn_en) { WRITE_MPEG_REG(DI_MTNCRD_X, (di_mtncrd_mif->start_x << 16) | (di_mtncrd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNCRD_Y, (di_mtncrd_mif->start_y << 16) | (di_mtncrd_mif->end_y)); // start_y 0 end_y 239. if (!prepost_link) { WRITE_MPEG_REG(DI_MTNRD_CTRL, (di_mtnprd_mif->canvas_num << 8) | //mtnp canvas index. (0 << 16) | // urgent di_mtncrd_mif->canvas_num); // current field mtn canvas index. } else { WRITE_MPEG_REG(DI_MTNRD_CTRL, (0 << 8) | //mtnp canvas index. ((prepost_link && di_vpp_en) << 16) | // urgent di_mtncrd_mif->canvas_num); // current field mtn canvas index. } } if (blend_mtn_en && !prepost_link) { WRITE_MPEG_REG(DI_MTNPRD_X, (di_mtnprd_mif->start_x << 16) | (di_mtnprd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNPRD_Y, (di_mtnprd_mif->start_y << 16) | (di_mtnprd_mif->end_y)); // start_y 0 end_y 239. } #elif defined(CONFIG_ARCH_MESON2) if (blend_mtn_en) { WRITE_MPEG_REG(DI_MTNCRD_X, (di_mtncrd_mif->start_x << 16) | (di_mtncrd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNCRD_Y, (di_mtncrd_mif->start_y << 16) | (di_mtncrd_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNPRD_X, (di_mtnprd_mif->start_x << 16) | (di_mtnprd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNPRD_Y, (di_mtnprd_mif->start_y << 16) | (di_mtnprd_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNRD_CTRL, (di_mtnprd_mif->canvas_num << 8) | //mtnp canvas index. ((prepost_link && di_vpp_en) << 16) | // urgent di_mtncrd_mif->canvas_num); // current field mtn canvas index. } #endif if (di_ddr_en) { WRITE_MPEG_REG(DI_DIWR_X, (di_diwr_mif->start_x << 16) | (di_diwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_DIWR_Y, (di_diwr_mif->start_y << 16) | (di_diwr_mif->end_y * 2 + 1)); // start_y 0 end_y 479. WRITE_MPEG_REG(DI_DIWR_CTRL, di_diwr_mif->canvas_num | // canvas index. (di_vpp_en << 8)); // urgent. } #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_PRE_CTRL, nr_en | // NR enable (mtn_en << 1) | // MTN_EN (pd32_check_en << 2) | // check 3:2 pulldown (pd22_check_en << 3) | // check 2:2 pulldown (1 << 4) | // 2:2 check mid pixel come from next field after MTN. (hist_check_en << 5) | // hist check enable (0 << 6) | // hist check not use chan2. ((!nr_en) << 7) | // hist check use data before noise reduction. (pd22_check_en << 8) | // chan 2 enable for 2:2 pull down check. (pd22_check_en << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (di_vpp_en << 12) | // pre viu link (hold_line << 16) | // pre hold line number (pre_field_num << 29) | // pre field number. (0x1 << 30) // pre soft rst, pre frame rst. ); WRITE_MPEG_REG(DI_POST_CTRL, ((ei_en || di_vpp_en || di_ddr_en) << 0) | // line buffer 0 enable (buf1_en << 1) | // line buffer 1 enable (ei_en << 2) | // ei enable (blend_mtn_en << 3) | // mtn line buffer enable ((blend_mtn_en && !prepost_link) << 4) | // mtnp read mif enable (blend_en << 5) | // di blend enble. (1 << 6) | // di mux output enable (di_ddr_en << 7) | // di write to SDRAM enable. (di_vpp_en << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (1 << 12) | // post viu link (prepost_link << 13) | (hold_line << 16) | // post hold line number (post_field_num << 29) | // post field number. (0x1 << 30) // post soft rst post frame rst. ); #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_PRE_CTRL, nr_en | // NR enable (mtn_en << 1) | // MTN_EN (pd32_check_en << 2) | // check 3:2 pulldown (pd22_check_en << 3) | // check 2:2 pulldown (nr_en << 4) | // 2:2 check mid pixel come from next field after MTN. (hist_check_en << 5) | // hist check enable (1 << 6) | // hist check not use chan2. ((!nr_en) << 7) | // hist check use data before noise reduction. (pd22_check_en << 8) | // chan 2 enable for 2:2 pull down check. (pd22_check_en << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (di_vpp_en << 12) | // pre viu link (hold_line << 16) | // pre hold line number (nr_en << 22) | // MTN after NR. (pre_field_num << 29) | // pre field number. (0x1 << 30) // pre soft rst, pre frame rst. ); WRITE_MPEG_REG(DI_POST_CTRL, ((ei_en || blend_en) << 0) | // line buffer 0 enable (buf1_en << 1) | // line buffer 1 enable (ei_en << 2) | // ei enable (blend_mtn_en << 3) | // mtn line buffer enable (blend_mtn_en << 4) | // mtnp read mif enable (blend_en << 5) | // di blend enble. (1 << 6) | // di mux output enable (di_ddr_en << 7) | // di write to SDRAM enable. (di_vpp_en << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (di_vpp_en << 12) | // post viu link (prepost_link << 13) | (hold_line << 16) | // post hold line number (post_field_num << 29) | // post field number. (0x1 << 30) // post soft rst post frame rst. ); #endif if (ei_only == 0) { #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_BLEND_CTRL, (READ_MPEG_REG(DI_BLEND_CTRL) & (~((1 << 25) | (3 << 20)))) | // clean some bit we need to set. (blend_mtn_en << 26) | // blend mtn enable. (0 << 25) | // blend with the mtn of the pre display field and next display field. (1 << 24) | // blend with pre display field. (blend_mode << 20) // motion adaptive blend. ); #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_BLEND_CTRL, (post_mb_en << 28) | // post motion blur enable. (0 << 27) | // mtn3p(l, c, r) max. (0 << 26) | // mtn3p(l, c, r) min. (0 << 25) | // mtn3p(l, c, r) ave. (1 << 24) | // mtntopbot max (blend_mtn_filt_en << 23) | // blend mtn filter enable. (blend_data_filt_en << 22) | // blend data filter enable. (blend_mode << 20) | // motion adaptive blend. 25 // kdeint. ); WRITE_MPEG_REG(DI_BLEND_CTRL1, (196 << 24) | // char level (64 << 16) | // angle thredhold. (40 << 8) | // all_af filt thd. (64)); // all 4 equal WRITE_MPEG_REG(DI_BLEND_CTRL2, (4 << 8) | // mtn no mov level. (48)); //black level. #endif } } int di_mode_check(int cur_field) { int i; WRITE_MPEG_REG(DI_INFO_ADDR, 0); #if defined(CONFIG_ARCH_MESON) for (i = 0; i <= 76; i++) #elif defined(CONFIG_ARCH_MESON2) for (i = 0; i <= 82; i++) #endif { di_info[cur_field][i] = READ_MPEG_REG(DI_INFO_DATA); } WRITE_MPEG_REG(DI_PRE_CTRL, READ_MPEG_REG(DI_PRE_CTRL) | (0x1 << 30)); // pre soft rst WRITE_MPEG_REG(DI_POST_CTRL, READ_MPEG_REG(DI_POST_CTRL) | (0x1 << 30)); // post soft rst return (0); } void set_di_inp_fmt_more(int hfmt_en, int hz_yc_ratio, //2bit int hz_ini_phase, //4bit int vfmt_en, int vt_yc_ratio, //2bit int vt_ini_phase, //4bit int y_length, int c_length, int hz_rpt //1bit ) { int repeat_l0_en = 1, nrpt_phase0_en = 0; int vt_phase_step = (16 >> vt_yc_ratio); WRITE_MPEG_REG(DI_INP_FMT_CTRL, (hz_rpt << 28) | //hz rpt pixel (hz_ini_phase << 24) | //hz ini phase (0 << 23) | //repeat p0 enable (hz_yc_ratio << 21) | //hz yc ratio (hfmt_en << 20) | //hz enable (nrpt_phase0_en << 17) | //nrpt_phase0 enable (repeat_l0_en << 16) | //repeat l0 enable (0 << 12) | //skip line num (vt_ini_phase << 8) | //vt ini phase (vt_phase_step << 1) | //vt phase step (3.4) (vfmt_en << 0) //vt enable ); WRITE_MPEG_REG(DI_INP_FMT_W, (y_length << 16) | //hz format width (c_length << 0) //vt format width ); } void set_di_inp_mif(DI_MIF_t *mif, int urgent, int hold_line) { unsigned long bytes_per_pixel; unsigned long demux_mode; unsigned long chro_rpt_lastl_ctrl; unsigned long luma0_rpt_loop_start; unsigned long luma0_rpt_loop_end; unsigned long luma0_rpt_loop_pat; unsigned long chroma0_rpt_loop_start; unsigned long chroma0_rpt_loop_end; unsigned long chroma0_rpt_loop_pat; unsigned long vt_ini_phase = 0; if (mif->set_separate_en == 1 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 1; chroma0_rpt_loop_end = 1; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x80; if (mif->output_field_num == 0) { vt_ini_phase = 0xe; } else { vt_ini_phase = 0xa; } } else if (mif->set_separate_en == 1 && mif->src_field_mode == 0) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x0; chroma0_rpt_loop_pat = 0x0; } else if (mif->set_separate_en == 0 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x00; } else { chro_rpt_lastl_ctrl = 0; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x00; chroma0_rpt_loop_pat = 0x00; } bytes_per_pixel = mif->set_separate_en ? 0 : (mif->video_mode ? 2 : 1); demux_mode = mif->video_mode; // ---------------------- // General register // ---------------------- WRITE_MPEG_REG(DI_INP_GEN_REG, (urgent << 28) | // chroma urgent bit (urgent << 27) | // luma urgent bit. (1 << 25) | // no dummy data. (hold_line << 19) | // hold lines (1 << 18) | // push dummy pixel (demux_mode << 16) | // demux_mode (bytes_per_pixel << 14) | (mif->burst_size_cr << 12) | (mif->burst_size_cb << 10) | (mif->burst_size_y << 8) | (chro_rpt_lastl_ctrl << 6) | (mif->set_separate_en << 1) | (1 << 0) // cntl_enable ); // ---------------------- // Canvas // ---------------------- WRITE_MPEG_REG(DI_INP_CANVAS0, (mif->canvas0_addr2 << 16) | // cntl_canvas0_addr2 (mif->canvas0_addr1 << 8) | // cntl_canvas0_addr1 (mif->canvas0_addr0 << 0) // cntl_canvas0_addr0 ); // ---------------------- // Picture 0 X/Y start,end // ---------------------- WRITE_MPEG_REG(DI_INP_LUMA_X0, (mif->luma_x_end0 << 16) | // cntl_luma_x_end0 (mif->luma_x_start0 << 0) // cntl_luma_x_start0 ); WRITE_MPEG_REG(DI_INP_LUMA_Y0, (mif->luma_y_end0 << 16) | // cntl_luma_y_end0 (mif->luma_y_start0 << 0) // cntl_luma_y_start0 ); WRITE_MPEG_REG(DI_INP_CHROMA_X0, (mif->chroma_x_end0 << 16) | (mif->chroma_x_start0 << 0) ); WRITE_MPEG_REG(DI_INP_CHROMA_Y0, (mif->chroma_y_end0 << 16) | (mif->chroma_y_start0 << 0) ); // ---------------------- // Repeat or skip // ---------------------- WRITE_MPEG_REG(DI_INP_RPT_LOOP, (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (chroma0_rpt_loop_start << 12) | (chroma0_rpt_loop_end << 8) | (luma0_rpt_loop_start << 4) | (luma0_rpt_loop_end << 0) ) ; WRITE_MPEG_REG(DI_INP_LUMA0_RPT_PAT, luma0_rpt_loop_pat); WRITE_MPEG_REG(DI_INP_CHROMA0_RPT_PAT, chroma0_rpt_loop_pat); // Dummy pixel value WRITE_MPEG_REG(DI_INP_DUMMY_PIXEL, 0x00808000); if ((mif->set_separate_en == 1)) { // 4:2:0 block mode. set_di_inp_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 1, // vfmt_en 1, // vt_yc_ratio vt_ini_phase, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length mif->chroma_x_end0 - mif->chroma_x_start0 + 1 , // c length 0); // hz repeat. } else { set_di_inp_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 0, // vfmt_en 0, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length ((mif->luma_x_end0 >> 1) - (mif->luma_x_start0 >> 1) + 1), // c length 0); // hz repeat. } } void set_di_mem_fmt_more(int hfmt_en, int hz_yc_ratio, //2bit int hz_ini_phase, //4bit int vfmt_en, int vt_yc_ratio, //2bit int vt_ini_phase, //4bit int y_length, int c_length, int hz_rpt //1bit ) { int vt_phase_step = (16 >> vt_yc_ratio); WRITE_MPEG_REG(DI_MEM_FMT_CTRL, (hz_rpt << 28) | //hz rpt pixel (hz_ini_phase << 24) | //hz ini phase (0 << 23) | //repeat p0 enable (hz_yc_ratio << 21) | //hz yc ratio (hfmt_en << 20) | //hz enable (1 << 17) | //nrpt_phase0 enable (0 << 16) | //repeat l0 enable (0 << 12) | //skip line num (vt_ini_phase << 8) | //vt ini phase (vt_phase_step << 1) | //vt phase step (3.4) (vfmt_en << 0) //vt enable ); WRITE_MPEG_REG(DI_MEM_FMT_W, (y_length << 16) | //hz format width (c_length << 0) //vt format width ); } void set_di_mem_mif(DI_MIF_t *mif, int urgent, int hold_line) { unsigned long bytes_per_pixel; unsigned long demux_mode; unsigned long chro_rpt_lastl_ctrl; unsigned long luma0_rpt_loop_start; unsigned long luma0_rpt_loop_end; unsigned long luma0_rpt_loop_pat; unsigned long chroma0_rpt_loop_start; unsigned long chroma0_rpt_loop_end; unsigned long chroma0_rpt_loop_pat; if (mif->set_separate_en == 1 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 1; chroma0_rpt_loop_end = 1; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x80; } else if (mif->set_separate_en == 1 && mif->src_field_mode == 0) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x0; chroma0_rpt_loop_pat = 0x0; } else if (mif->set_separate_en == 0 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x00; } else { chro_rpt_lastl_ctrl = 0; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x00; chroma0_rpt_loop_pat = 0x00; } bytes_per_pixel = mif->set_separate_en ? 0 : (mif->video_mode ? 2 : 1); demux_mode = mif->video_mode; // ---------------------- // General register // ---------------------- WRITE_MPEG_REG(DI_MEM_GEN_REG, (urgent << 28) | // urgent bit. (urgent << 27) | // urgent bit. (1 << 25) | // no dummy data. (hold_line << 19) | // hold lines (1 << 18) | // push dummy pixel (demux_mode << 16) | // demux_mode (bytes_per_pixel << 14) | (mif->burst_size_cr << 12) | (mif->burst_size_cb << 10) | (mif->burst_size_y << 8) | (chro_rpt_lastl_ctrl << 6) | (mif->set_separate_en << 1) | (1 << 0) // cntl_enable ); // ---------------------- // Canvas // ---------------------- WRITE_MPEG_REG(DI_MEM_CANVAS0, (mif->canvas0_addr2 << 16) | // cntl_canvas0_addr2 (mif->canvas0_addr1 << 8) | // cntl_canvas0_addr1 (mif->canvas0_addr0 << 0) // cntl_canvas0_addr0 ); // ---------------------- // Picture 0 X/Y start,end // ---------------------- WRITE_MPEG_REG(DI_MEM_LUMA_X0, (mif->luma_x_end0 << 16) | // cntl_luma_x_end0 (mif->luma_x_start0 << 0) // cntl_luma_x_start0 ); WRITE_MPEG_REG(DI_MEM_LUMA_Y0, (mif->luma_y_end0 << 16) | // cntl_luma_y_end0 (mif->luma_y_start0 << 0) // cntl_luma_y_start0 ); WRITE_MPEG_REG(DI_MEM_CHROMA_X0, (mif->chroma_x_end0 << 16) | (mif->chroma_x_start0 << 0) ); WRITE_MPEG_REG(DI_MEM_CHROMA_Y0, (mif->chroma_y_end0 << 16) | (mif->chroma_y_start0 << 0) ); // ---------------------- // Repeat or skip // ---------------------- WRITE_MPEG_REG(DI_MEM_RPT_LOOP, (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (chroma0_rpt_loop_start << 12) | (chroma0_rpt_loop_end << 8) | (luma0_rpt_loop_start << 4) | (luma0_rpt_loop_end << 0) ) ; WRITE_MPEG_REG(DI_MEM_LUMA0_RPT_PAT, luma0_rpt_loop_pat); WRITE_MPEG_REG(DI_MEM_CHROMA0_RPT_PAT, chroma0_rpt_loop_pat); // Dummy pixel value WRITE_MPEG_REG(DI_MEM_DUMMY_PIXEL, 0x00808000); if ((mif->set_separate_en == 1)) { // 4:2:0 block mode. set_di_mem_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 1, // vfmt_en 1, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length mif->chroma_x_end0 - mif->chroma_x_start0 + 1, // c length 0); // hz repeat. } else { set_di_mem_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 0, // vfmt_en 0, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length ((mif->luma_x_end0 >> 1) - (mif->luma_x_start0 >> 1) + 1), // c length 0); // hz repeat. } } void set_di_if1_fmt_more(int hfmt_en, int hz_yc_ratio, //2bit int hz_ini_phase, //4bit int vfmt_en, int vt_yc_ratio, //2bit int vt_ini_phase, //4bit int y_length, int c_length, int hz_rpt //1bit ) { int vt_phase_step = (16 >> vt_yc_ratio); WRITE_MPEG_REG(DI_IF1_FMT_CTRL, (hz_rpt << 28) | //hz rpt pixel (hz_ini_phase << 24) | //hz ini phase (0 << 23) | //repeat p0 enable (hz_yc_ratio << 21) | //hz yc ratio (hfmt_en << 20) | //hz enable (1 << 17) | //nrpt_phase0 enable (0 << 16) | //repeat l0 enable (0 << 12) | //skip line num (vt_ini_phase << 8) | //vt ini phase (vt_phase_step << 1) | //vt phase step (3.4) (vfmt_en << 0) //vt enable ); WRITE_MPEG_REG(DI_IF1_FMT_W, (y_length << 16) | //hz format width (c_length << 0) //vt format width ); } void set_di_if1_mif(DI_MIF_t *mif, int urgent, int hold_line) { unsigned long bytes_per_pixel; unsigned long demux_mode; unsigned long chro_rpt_lastl_ctrl; unsigned long luma0_rpt_loop_start; unsigned long luma0_rpt_loop_end; unsigned long luma0_rpt_loop_pat; unsigned long chroma0_rpt_loop_start; unsigned long chroma0_rpt_loop_end; unsigned long chroma0_rpt_loop_pat; if (mif->set_separate_en == 1 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 1; chroma0_rpt_loop_end = 1; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x80; } else if (mif->set_separate_en == 1 && mif->src_field_mode == 0) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x0; chroma0_rpt_loop_pat = 0x0; } else if (mif->set_separate_en == 0 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x00; } else { chro_rpt_lastl_ctrl = 0; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x00; chroma0_rpt_loop_pat = 0x00; } bytes_per_pixel = mif->set_separate_en ? 0 : (mif->video_mode ? 2 : 1); demux_mode = mif->video_mode; // ---------------------- // General register // ---------------------- WRITE_MPEG_REG(DI_IF1_GEN_REG, (urgent << 28) | // urgent (urgent << 27) | // luma urgent (1 << 25) | // no dummy data. (hold_line << 19) | // hold lines (1 << 18) | // push dummy pixel (demux_mode << 16) | // demux_mode (bytes_per_pixel << 14) | (mif->burst_size_cr << 12) | (mif->burst_size_cb << 10) | (mif->burst_size_y << 8) | (chro_rpt_lastl_ctrl << 6) | (mif->set_separate_en << 1) | (1 << 0) // cntl_enable ); // ---------------------- // Canvas // ---------------------- WRITE_MPEG_REG(DI_IF1_CANVAS0, (mif->canvas0_addr2 << 16) | // cntl_canvas0_addr2 (mif->canvas0_addr1 << 8) | // cntl_canvas0_addr1 (mif->canvas0_addr0 << 0) // cntl_canvas0_addr0 ); // ---------------------- // Picture 0 X/Y start,end // ---------------------- WRITE_MPEG_REG(DI_IF1_LUMA_X0, (mif->luma_x_end0 << 16) | // cntl_luma_x_end0 (mif->luma_x_start0 << 0) // cntl_luma_x_start0 ); WRITE_MPEG_REG(DI_IF1_LUMA_Y0, (mif->luma_y_end0 << 16) | // cntl_luma_y_end0 (mif->luma_y_start0 << 0) // cntl_luma_y_start0 ); WRITE_MPEG_REG(DI_IF1_CHROMA_X0, (mif->chroma_x_end0 << 16) | (mif->chroma_x_start0 << 0) ); WRITE_MPEG_REG(DI_IF1_CHROMA_Y0, (mif->chroma_y_end0 << 16) | (mif->chroma_y_start0 << 0) ); // ---------------------- // Repeat or skip // ---------------------- WRITE_MPEG_REG(DI_IF1_RPT_LOOP, (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (chroma0_rpt_loop_start << 12) | (chroma0_rpt_loop_end << 8) | (luma0_rpt_loop_start << 4) | (luma0_rpt_loop_end << 0) ) ; WRITE_MPEG_REG(DI_IF1_LUMA0_RPT_PAT, luma0_rpt_loop_pat); WRITE_MPEG_REG(DI_IF1_CHROMA0_RPT_PAT, chroma0_rpt_loop_pat); // Dummy pixel value WRITE_MPEG_REG(DI_IF1_DUMMY_PIXEL, 0x00808000); if ((mif->set_separate_en == 1)) { // 4:2:0 block mode. set_di_if1_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 1, // vfmt_en 1, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length mif->chroma_x_end0 - mif->chroma_x_start0 + 1 , // c length 0); // hz repeat. } else { set_di_if1_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 0, // vfmt_en 0, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length ((mif->luma_x_end0 >> 1) - (mif->luma_x_start0 >> 1) + 1), // c length 0); // hz repeat. } } void set_di_chan2_mif(DI_MIF_t *mif, int urgent, int hold_line) { unsigned long bytes_per_pixel; unsigned long demux_mode; unsigned long luma0_rpt_loop_start; unsigned long luma0_rpt_loop_end; unsigned long luma0_rpt_loop_pat; bytes_per_pixel = mif->set_separate_en ? 0 : ((mif->video_mode == 1) ? 2 : 1); demux_mode = mif->video_mode & 1; if (mif->src_field_mode == 1) { luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; luma0_rpt_loop_pat = 0x80; } else { luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0; } // ---------------------- // General register // ---------------------- WRITE_MPEG_REG(DI_CHAN2_GEN_REG, (urgent << 28) | // urgent (urgent << 27) | // luma urgent (1 << 25) | // no dummy data. (hold_line << 19) | // hold lines (1 << 18) | // push dummy pixel (demux_mode << 16) | // demux_mode (bytes_per_pixel << 14) | (0 << 12) | (0 << 10) | (mif->burst_size_y << 8) | ((hold_line == 0 ? 1 : 0) << 7) | //manual start. (0 << 6) | (0 << 1) | (1 << 0) // cntl_enable ); // ---------------------- // Canvas // ---------------------- WRITE_MPEG_REG(DI_CHAN2_CANVAS, (0 << 16) | // cntl_canvas0_addr2 (0 << 8) | // cntl_canvas0_addr1 (mif->canvas0_addr0 << 0) // cntl_canvas0_addr0 ); // ---------------------- // Picture 0 X/Y start,end // ---------------------- WRITE_MPEG_REG(DI_CHAN2_LUMA_X, (mif->luma_x_end0 << 16) | // cntl_luma_x_end0 (mif->luma_x_start0 << 0) // cntl_luma_x_start0 ); WRITE_MPEG_REG(DI_CHAN2_LUMA_Y, (mif->luma_y_end0 << 16) | // cntl_luma_y_end0 (mif->luma_y_start0 << 0) // cntl_luma_y_start0 ); // ---------------------- // Repeat or skip // ---------------------- WRITE_MPEG_REG(DI_CHAN2_RPT_LOOP, (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (0 << 12) | (0 << 8) | (luma0_rpt_loop_start << 4) | (luma0_rpt_loop_end << 0) ); WRITE_MPEG_REG(DI_CHAN2_LUMA_RPT_PAT, luma0_rpt_loop_pat); // Dummy pixel value WRITE_MPEG_REG(DI_CHAN2_DUMMY_PIXEL, 0x00808000); } void set_di_if0_mif(DI_MIF_t *mif, int urgent, int hold_line) { unsigned long bytes_per_pixel; unsigned long demux_mode; unsigned long chro_rpt_lastl_ctrl; unsigned long luma0_rpt_loop_start; unsigned long luma0_rpt_loop_end; unsigned long luma0_rpt_loop_pat; unsigned long chroma0_rpt_loop_start; unsigned long chroma0_rpt_loop_end; unsigned long chroma0_rpt_loop_pat; if (mif->set_separate_en == 1 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 1; chroma0_rpt_loop_end = 1; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x80; } else if (mif->set_separate_en == 1 && mif->src_field_mode == 0) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x0; chroma0_rpt_loop_pat = 0x0; } else if (mif->set_separate_en == 0 && mif->src_field_mode == 1) { chro_rpt_lastl_ctrl = 1; luma0_rpt_loop_start = 1; luma0_rpt_loop_end = 1; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x80; chroma0_rpt_loop_pat = 0x00; } else { chro_rpt_lastl_ctrl = 0; luma0_rpt_loop_start = 0; luma0_rpt_loop_end = 0; chroma0_rpt_loop_start = 0; chroma0_rpt_loop_end = 0; luma0_rpt_loop_pat = 0x00; chroma0_rpt_loop_pat = 0x00; } bytes_per_pixel = mif->set_separate_en ? 0 : (mif->video_mode ? 2 : 1); demux_mode = mif->video_mode; // ---------------------- // General register // ---------------------- WRITE_MPEG_REG(VD1_IF0_GEN_REG, (urgent << 28) | // urgent (urgent << 27) | // luma urgent (1 << 25) | // no dummy data. (hold_line << 19) | // hold lines (1 << 18) | // push dummy pixel (demux_mode << 16) | // demux_mode (bytes_per_pixel << 14) | (mif->burst_size_cr << 12) | (mif->burst_size_cb << 10) | (mif->burst_size_y << 8) | (chro_rpt_lastl_ctrl << 6) | (mif->set_separate_en << 1) | (1 << 0) // cntl_enable ); // ---------------------- // Canvas // ---------------------- WRITE_MPEG_REG(VD1_IF0_CANVAS0, (mif->canvas0_addr2 << 16) | // cntl_canvas0_addr2 (mif->canvas0_addr1 << 8) | // cntl_canvas0_addr1 (mif->canvas0_addr0 << 0) // cntl_canvas0_addr0 ); // ---------------------- // Picture 0 X/Y start,end // ---------------------- WRITE_MPEG_REG(VD1_IF0_LUMA_X0, (mif->luma_x_end0 << 16) | // cntl_luma_x_end0 (mif->luma_x_start0 << 0) // cntl_luma_x_start0 ); WRITE_MPEG_REG(VD1_IF0_LUMA_Y0, (mif->luma_y_end0 << 16) | // cntl_luma_y_end0 (mif->luma_y_start0 << 0) // cntl_luma_y_start0 ); WRITE_MPEG_REG(VD1_IF0_CHROMA_X0, (mif->chroma_x_end0 << 16) | (mif->chroma_x_start0 << 0) ); WRITE_MPEG_REG(VD1_IF0_CHROMA_Y0, (mif->chroma_y_end0 << 16) | (mif->chroma_y_start0 << 0) ); // ---------------------- // Repeat or skip // ---------------------- WRITE_MPEG_REG(VD1_IF0_RPT_LOOP, (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (chroma0_rpt_loop_start << 12) | (chroma0_rpt_loop_end << 8) | (luma0_rpt_loop_start << 4) | (luma0_rpt_loop_end << 0) ) ; WRITE_MPEG_REG(VD1_IF0_LUMA0_RPT_PAT, luma0_rpt_loop_pat); WRITE_MPEG_REG(VD1_IF0_CHROMA0_RPT_PAT, chroma0_rpt_loop_pat); // Dummy pixel value WRITE_MPEG_REG(VD1_IF0_DUMMY_PIXEL, 0x00808000); // ---------------------- // Picture 1 unused // ---------------------- WRITE_MPEG_REG(VD1_IF0_LUMA_X1, 0); // unused WRITE_MPEG_REG(VD1_IF0_LUMA_Y1, 0); // unused WRITE_MPEG_REG(VD1_IF0_CHROMA_X1, 0); // unused WRITE_MPEG_REG(VD1_IF0_CHROMA_Y1, 0); // unused WRITE_MPEG_REG(VD1_IF0_LUMA_PSEL, 0); // unused only one picture WRITE_MPEG_REG(VD1_IF0_CHROMA_PSEL, 0); // unused only one picture if ((mif->set_separate_en == 1)) { // 4:2:0 block mode. set_vd1_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 1, // vfmt_en 1, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length mif->chroma_x_end0 - mif->chroma_x_start0 + 1 , // c length 0); // hz repeat. } else { set_vd1_fmt_more( 1, // hfmt_en 1, // hz_yc_ratio 0, // hz_ini_phase 0, // vfmt_en 0, // vt_yc_ratio 0, // vt_ini_phase mif->luma_x_end0 - mif->luma_x_start0 + 1, // y_length ((mif->luma_x_end0 >> 1) - (mif->luma_x_start0 >> 1) + 1) , //c length 0); // hz repeat. } } //enable deinterlace pre module separated for pre post separate tests. void enable_di_pre( DI_MIF_t *di_inp_mif, DI_MIF_t *di_mem_mif, DI_MIF_t *di_chan2_mif, DI_SIM_MIF_t *di_nrwr_mif, DI_SIM_MIF_t *di_mtnwr_mif, int nr_en, int mtn_en, int pd32_check_en, int pd22_check_en, int hist_check_en, #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) int nr_hfilt_en, int nr_hfilt_mb_en, int mtn_modify_en, #endif int pre_field_num, int pre_viu_link, int hold_line) { int hist_check_only; #if defined(CONFIG_ARCH_MESON2) int nr_zone_0, nr_zone_1, nr_zone_2; if (noise_reduction_level == 0) { nr_zone_0 = 1; nr_zone_1 = 3; nr_zone_2 = 5; } else { nr_zone_0 = 3; nr_zone_1 = 6; nr_zone_2 = 10; } #endif hist_check_only = hist_check_en && !nr_en && !mtn_en && !pd22_check_en && !pd32_check_en ; if (nr_en | mtn_en | pd22_check_en || pd32_check_en) { set_di_mem_mif(di_mem_mif, 0, hold_line); // set urgent 0 if (!vdin_en) { set_di_inp_mif(di_inp_mif, 0, hold_line); // set urgent 0 } } if (pd22_check_en || hist_check_only) { set_di_chan2_mif(di_chan2_mif, 0, hold_line); // set urgent 0. } // set nr wr mif interface. if (nr_en) { WRITE_MPEG_REG(DI_NRWR_X, (di_nrwr_mif->start_x << 16) | (di_nrwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_NRWR_Y, (di_nrwr_mif->start_y << 16) | (di_nrwr_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_NRWR_CTRL, di_nrwr_mif->canvas_num); // canvas index. #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_NR_CTRL0, (1 << 31) | // nr yuv enable. (1 << 30) | // nr range. 3 point (0 << 29) | // max of 3 point. (nr_hfilt_en << 28) | // nr hfilter enable. (nr_hfilt_mb_en << 27) | // nr hfilter motion_blur enable. (nr_zone_2 << 16) | // zone 2 (nr_zone_1 << 8) | // zone 1 (nr_zone_0 << 0)); // zone 0 WRITE_MPEG_REG(DI_NR_CTRL2, (10 << 24) | //intra noise level (1 << 16) | // intra no noise level. (10 << 8) | // inter noise level. (1 << 0)); // inter no noise level. WRITE_MPEG_REG(DI_NR_CTRL3, (16 << 16) | // if any one of 3 point mtn larger than 16 don't use 3 point. 720); // if one line eq cnt is larger than this number, this line is not conunted. #endif } // motion wr mif. if (mtn_en) { WRITE_MPEG_REG(DI_MTNWR_X, (di_mtnwr_mif->start_x << 16) | (di_mtnwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNWR_Y, (di_mtnwr_mif->start_y << 16) | (di_mtnwr_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNWR_CTRL, di_mtnwr_mif->canvas_num | // canvas index. (0 << 8)); // urgent. #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_MTN_CTRL, (1 << 31) | // lpf enable. (1 << 30) | // mtn uv enable. (mtn_modify_en << 29) | // no mtn modify. (2 << 24) | // char diff count. (40 << 16) | // black level. (196 << 8) | // white level. (64 << 0)); // char diff level. WRITE_MPEG_REG(DI_MTN_CTRL1, (3 << 8) | // mtn shift if mtn modifty_en 0); // mtn reduce before shift. #endif } // reset pre WRITE_MPEG_REG(DI_PRE_CTRL, READ_MPEG_REG(DI_PRE_CTRL) | 1 << 31); // frame reset for the pre modules. #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_PRE_CTRL, nr_en | // NR enable (mtn_en << 1) | // MTN_EN (pd32_check_en << 2) | // check 3:2 pulldown (pd22_check_en << 3) | // check 2:2 pulldown (1 << 4) | // 2:2 check mid pixel come from next field after MTN. (hist_check_en << 5) | // hist check enable (hist_check_only << 6) | // hist check use chan2. ((!nr_en) << 7) | // hist check use data before noise reduction. ((pd22_check_en || hist_check_only) << 8) | // chan 2 enable for 2:2 pull down check. (pd22_check_en << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (0 << 12) | // pre viu link (hold_line << 16) | // pre hold line number (pre_field_num << 29) | // pre field number. (0x1 << 30) // pre soft rst, pre frame rst. ); #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_PRE_CTRL, nr_en | // NR enable (mtn_en << 1) | // MTN_EN (pd32_check_en << 2) | // check 3:2 pulldown (pd22_check_en << 3) | // check 2:2 pulldown (1 << 4) | // 2:2 check mid pixel come from next field after MTN. (hist_check_en << 5) | // hist check enable (1 << 6) | // hist check use chan2. ((!nr_en) << 7) | // hist check use data before noise reduction. ((pd22_check_en || hist_check_only) << 8) | // chan 2 enable for 2:2 pull down check. (pd22_check_en << 9) | // line buffer 2 enable (0 << 10) | // pre drop first. (0 << 11) | // pre repeat. (0 << 12) | // pre viu link (hold_line << 16) | // pre hold line number (1 << 22) | // MTN after NR. (pre_field_num << 29) | // pre field number. (0x1 << 30) // pre soft rst, pre frame rst. ); #endif } // enable di post module for separate test. void enable_di_post( DI_MIF_t *di_buf0_mif, DI_MIF_t *di_buf1_mif, DI_SIM_MIF_t *di_diwr_mif, DI_SIM_MIF_t *di_mtncrd_mif, DI_SIM_MIF_t *di_mtnprd_mif, int ei_en, int blend_en, int blend_mtn_en, int blend_mode, int di_vpp_en, int di_ddr_en, #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) int blend_mtn_filt_en, int blend_data_filt_en, int post_mb_en, #endif int post_field_num, int hold_line) { int ei_only; int buf1_en; ei_only = ei_en && !blend_en && (di_vpp_en || di_ddr_en); buf1_en = (!ei_only && (di_ddr_en || di_vpp_en)); if (ei_en || di_vpp_en || di_ddr_en) { set_di_if0_mif(di_buf0_mif, di_vpp_en, hold_line); } if (!ei_only && (di_ddr_en || di_vpp_en)) { set_di_if1_mif(di_buf1_mif, di_vpp_en, hold_line); } // motion for current display field. if (blend_mtn_en) { WRITE_MPEG_REG(DI_MTNPRD_X, (di_mtnprd_mif->start_x << 16) | (di_mtnprd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNPRD_Y, (di_mtnprd_mif->start_y << 16) | (di_mtnprd_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNCRD_X, (di_mtncrd_mif->start_x << 16) | (di_mtncrd_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_MTNCRD_Y, (di_mtncrd_mif->start_y << 16) | (di_mtncrd_mif->end_y)); // start_y 0 end_y 239. WRITE_MPEG_REG(DI_MTNRD_CTRL, (di_mtnprd_mif->canvas_num << 8) | //mtnp canvas index. (1 << 16) | // urgent di_mtncrd_mif->canvas_num); // current field mtn canvas index. } if (di_ddr_en) { WRITE_MPEG_REG(DI_DIWR_X, (di_diwr_mif->start_x << 16) | (di_diwr_mif->end_x)); // start_x 0 end_x 719. WRITE_MPEG_REG(DI_DIWR_Y, (di_diwr_mif->start_y << 16) | (di_diwr_mif->end_y * 2 + 1)); // start_y 0 end_y 479. WRITE_MPEG_REG(DI_DIWR_CTRL, di_diwr_mif->canvas_num | // canvas index. (di_vpp_en << 8)); // urgent. } if (ei_only == 0) { #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_BLEND_CTRL, (READ_MPEG_REG(DI_BLEND_CTRL) & (~((1 << 25) | (3 << 20)))) | // clean some bit we need to set. (blend_mtn_en << 26) | // blend mtn enable. (0 << 25) | // blend with the mtn of the pre display field and next display field. (1 << 24) | // blend with pre display field. (blend_mode << 20) // motion adaptive blend. ); #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_BLEND_CTRL, (post_mb_en << 28) | // post motion blur enable. (0 << 27) | // mtn3p(l, c, r) max. (0 << 26) | // mtn3p(l, c, r) min. (0 << 25) | // mtn3p(l, c, r) ave. (1 << 24) | // mtntopbot max (blend_mtn_filt_en << 23) | // blend mtn filter enable. (blend_data_filt_en << 22) | // blend data filter enable. (blend_mode << 20) | // motion adaptive blend. 25 // kdeint. ); WRITE_MPEG_REG(DI_BLEND_CTRL1, (196 << 24) | // char level (64 << 16) | // angle thredhold. (40 << 8) | // all_af filt thd. (64)); // all 4 equal WRITE_MPEG_REG(DI_BLEND_CTRL2, (4 << 8) | // mtn no mov level. (48)); //black level. #endif } #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_POST_CTRL, ((ei_en | blend_en) << 0) | // line buffer 0 enable (0 << 1) | // line buffer 1 enable (ei_en << 2) | // ei enable (blend_mtn_en << 3) | // mtn line buffer enable (blend_mtn_en << 4) | // mtnp read mif enable (blend_en << 5) | // di blend enble. (1 << 6) | // di mux output enable (di_ddr_en << 7) | // di write to SDRAM enable. (di_vpp_en << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (1 << 12) | // post viu link (hold_line << 16) | // post hold line number (post_field_num << 29) | // post field number. (0x1 << 30) // post soft rst post frame rst. ); #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_POST_CTRL, ((ei_en | blend_en) << 0) | // line buffer 0 enable (0 << 1) | // line buffer 1 enable (ei_en << 2) | // ei enable (blend_mtn_en << 3) | // mtn line buffer enable (blend_mtn_en << 4) | // mtnp read mif enable (blend_en << 5) | // di blend enble. (1 << 6) | // di mux output enable (di_ddr_en << 7) | // di write to SDRAM enable. (di_vpp_en << 8) | // di to VPP enable. (0 << 9) | // mif0 to VPP enable. (0 << 10) | // post drop first. (0 << 11) | // post repeat. (di_vpp_en << 12) | // post viu link (hold_line << 16) | // post hold line number (post_field_num << 29) | // post field number. (0x1 << 30) // post soft rst post frame rst. ); #endif } int di_pre_mode_check(int cur_field) { int i; WRITE_MPEG_REG(DI_INFO_ADDR, 0); for (i = 0; i <= 68; i++) { di_info[cur_field][i] = READ_MPEG_REG(DI_INFO_DATA); } #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) WRITE_MPEG_REG(DI_INFO_ADDR, 77); for (i = 77; i <= 82; i++) { di_info[cur_field][i] = READ_MPEG_REG(DI_INFO_DATA); } #endif return (0); } int di_post_mode_check(int cur_field) { int i; WRITE_MPEG_REG(DI_INFO_ADDR, 69); for (i = 69; i <= 76; i++) { di_info[cur_field][i] = READ_MPEG_REG(DI_INFO_DATA); } return (0); } void enable_region_blend( int reg0_en, int reg0_start_x, int reg0_end_x, int reg0_start_y, int reg0_end_y, int reg0_mode, int reg1_en, int reg1_start_x, int reg1_end_x, int reg1_start_y, int reg1_end_y, int reg1_mode, int reg2_en, int reg2_start_x, int reg2_end_x, int reg2_start_y, int reg2_end_y, int reg2_mode, int reg3_en, int reg3_start_x, int reg3_end_x, int reg3_start_y, int reg3_end_y, int reg3_mode) { WRITE_MPEG_REG(DI_BLEND_REG0_X, (reg0_start_x << 16) | reg0_end_x); WRITE_MPEG_REG(DI_BLEND_REG0_Y, (reg0_start_y << 16) | reg0_end_y); WRITE_MPEG_REG(DI_BLEND_REG1_X, (reg1_start_x << 16) | reg1_end_x); WRITE_MPEG_REG(DI_BLEND_REG1_Y, (reg1_start_y << 16) | reg1_end_y); WRITE_MPEG_REG(DI_BLEND_REG2_X, (reg2_start_x << 16) | reg2_end_x); WRITE_MPEG_REG(DI_BLEND_REG2_Y, (reg2_start_y << 16) | reg2_end_y); WRITE_MPEG_REG(DI_BLEND_REG3_X, (reg3_start_x << 16) | reg3_end_x); WRITE_MPEG_REG(DI_BLEND_REG3_Y, (reg3_start_y << 16) | reg3_end_y); WRITE_MPEG_REG(DI_BLEND_CTRL, (READ_MPEG_REG(DI_BLEND_CTRL) & (~(0xfff << 8))) | (reg0_mode << 8) | (reg1_mode << 10) | (reg2_mode << 12) | (reg3_mode << 14) | (reg0_en << 16) | (reg1_en << 17) | (reg2_en << 18) | (reg3_en << 19)); } int check_p32_p22(int cur_field, int pre_field, int pre2_field) { unsigned int cur_data, pre_data, pre2_data; unsigned int cur_num, pre_num, pre2_num; unsigned int data_diff, num_diff; di_p22_info = di_p22_info << 1; cur_data = di_info[cur_field][2]; pre_data = di_info[pre_field][2]; pre2_data = di_info[pre2_field][2]; cur_num = di_info[cur_field][4] & 0xffffff; pre_num = di_info[pre_field][4] & 0xffffff; pre2_num = di_info[pre2_field][4] & 0xffffff; if (cur_data * 2 <= pre_data && pre2_data * 2 <= pre_data && cur_num * 2 <= pre_num && pre2_num * 2 <= pre_num) { di_p22_info |= 1; } di_p32_info = di_p32_info << 1; di_p32_info_2 = di_p32_info_2 << 1; di_p22_info_2 = di_p22_info_2 << 1; cur_data = di_info[cur_field][0]; cur_num = di_info[cur_field][1] & 0xffffff; pre_data = di_info[pre_field][0]; pre_num = di_info[pre_field][1] & 0xffffff; data_diff = cur_data > pre_data ? cur_data - pre_data : pre_data - cur_data; num_diff = cur_num > pre_num ? cur_num - pre_num : pre_num - cur_num; if ((di_p22_info & 0x1) && data_diff * 10 <= cur_data && num_diff * 10 <= cur_num) { di_p22_info_2 |= 1; } if (di_p32_counter > 0 || di_p32_info == 0) { if (cur_data * 2 <= pre_data && cur_num * 50 <= pre_num) { di_p32_info |= 1; last_big_data = pre_data; last_big_num = pre_num; di_p32_counter = -1; } else { last_big_data = 0; last_big_num = 0; if ((di_p32_counter & 0x1) && data_diff * 5 <= cur_data && num_diff * 5 <= cur_num) { di_p32_info_2 |= 1; } } } else { if (cur_data * 2 <= last_big_data && cur_num * 50 <= last_big_num) { di_p32_info |= 1; di_p32_counter = -1; } } di_p32_counter++; return 0; } void pattern_check_prepost(void) { if (pre_field_counter != di_checked_field) { di_checked_field = pre_field_counter; di_mode_check(pre_field_counter % 4); #ifdef DEBUG debug_array[(pre_field_counter & 0x3ff) * 4] = di_info[pre_field_counter % 4][0]; debug_array[(pre_field_counter & 0x3ff) * 4 + 1] = di_info[pre_field_counter % 4][1] & 0xffffff; debug_array[(pre_field_counter & 0x3ff) * 4 + 2] = di_info[pre_field_counter % 4][2]; debug_array[(pre_field_counter & 0x3ff) * 4 + 3] = di_info[pre_field_counter % 4][4]; #endif if (pre_field_counter >= 3) { check_p32_p22(pre_field_counter % 4, (pre_field_counter + 3) % 4, (pre_field_counter + 2) % 4); #if defined(CONFIG_ARCH_MESON) pattern_22 = pattern_22 << 1; if (di_info[pre_field_counter % 4][4] < di_info[(pre_field_counter + 3) % 4][4]) { pattern_22 |= 1; } #endif } } di_chan2_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 3) % 4; di_mem_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 2) % 4; blend_mode = 3; #if defined(CONFIG_ARCH_MESON) di_buf0_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 3) % 4; // 2:2 check if (((di_p22_info & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK))) { blend_mode = 1; } else if (((di_p22_info & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK))) { blend_mode = 0; } #elif defined(CONFIG_ARCH_MESON2) di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 1) % 4; if (((di_p22_info & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK))) { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 3) % 4; blend_mode = 1; } else if (((di_p22_info & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK))) { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 1) % 4; blend_mode = 0; } #endif // pull down pattern check if (pattern_len == 0) { int i, j, pattern, pattern_2, mask; for (j = 5 ; j < 22 ; j++) { mask = (1 << j) - 1; pattern = di_p32_info & mask; pattern_2 = di_p32_info_2 & mask; if (pattern != 0 && pattern_2 != 0 && pattern != mask) { for (i = j ; i < j * 3 ; i += j) if (((di_p32_info >> i) & mask) != pattern || ((di_p32_info_2 >> i) & mask) != pattern_2) { break; } if (i == j * 3) { #if defined(CONFIG_ARCH_MESON) if (pattern_22 & (1 << (j - 1))) { blend_mode = 1; } else { blend_mode = 0; } #elif defined(CONFIG_ARCH_MESON2) if (di_info[(field_counter + 3) % 4][4] < di_info[(field_counter + 2) % 4][4]) { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 3) % 4; blend_mode = 1; } else { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 1) % 4; blend_mode = 0; } #endif pattern_len = j; break; } } } } else { int i, pattern, pattern_2, mask; mask = (1 << pattern_len) - 1; pattern = di_p32_info & mask; pattern_2 = di_p32_info_2 & mask; for (i = pattern_len ; i < pattern_len * 3 ; i += pattern_len) if (((di_p32_info >> i) & mask) != pattern || ((di_p32_info_2 >> i) & mask) != pattern_2) { break; } if (i == pattern_len * 3) { #if defined(CONFIG_ARCH_MESON) if (pattern_22 & (1 << (pattern_len - 1))) { blend_mode = 1; } else { blend_mode = 0; } #elif defined(CONFIG_ARCH_MESON2) if (di_info[(field_counter + 3) % 4][4] < di_info[(field_counter + 2) % 4][4]) { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 3) % 4; blend_mode = 1; } else { di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + (field_counter + 1) % 4; blend_mode = 0; } #endif } else { pattern_len = 0; } } di_nrwr_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + field_counter % 4; di_mtnwr_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 4 + field_counter % 4; di_mtncrd_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 4 + (field_counter + 2) % 4; di_mtnprd_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 4 + (field_counter + 3) % 4; } void pattern_check_pre(void) { di_pre_mode_check(pre_field_counter % 4); #ifdef DEBUG debug_array[(pre_field_counter & 0x3ff) * 4] = di_info[pre_field_counter % 4][0]; debug_array[(pre_field_counter & 0x3ff) * 4 + 1] = di_info[pre_field_counter % 4][1] & 0xffffff; debug_array[(pre_field_counter & 0x3ff) * 4 + 2] = di_info[pre_field_counter % 4][2]; debug_array[(pre_field_counter & 0x3ff) * 4 + 3] = di_info[pre_field_counter % 4][4]; #endif if (pre_field_counter >= 3) { check_p32_p22(pre_field_counter % 4, (pre_field_counter - 1) % 4, (pre_field_counter - 2) % 4); if (di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode == 3) { if (((di_p22_info & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0x5555555555555555LL & PATTERN22_MARK))) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 1; } else if (((di_p22_info & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK)) && ((di_p22_info_2 & PATTERN22_MARK) == (0xaaaaaaaaaaaaaaaaLL & PATTERN22_MARK))) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 0; } else if (pattern_len == 0) { di_buf_pool[(pre_field_counter - 2) % DI_BUF_NUM].blend_mode = 3; } if (pattern_len == 0) { int i, j, pattern, pattern_2, mask; for (j = 5 ; j < 22 ; j++) { mask = (1 << j) - 1; pattern = di_p32_info & mask; pattern_2 = di_p32_info_2 & mask; if (pattern != 0 && pattern_2 != 0 && pattern != mask) { for (i = j ; i < j * PATTERN32_NUM ; i += j) if (((di_p32_info >> i) & mask) != pattern || ((di_p32_info_2 >> i) & mask) != pattern_2) { break; } if (i == j * PATTERN32_NUM) { if ((pattern_len == 5) && ((pattern & (pattern - 1)) == 0)) { if ((di_p32_info & 0x1) || (di_p32_info & 0x2) || (di_p32_info & 0x8)) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 0; } else { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 1; } } else { if ((pattern & (pattern - 1)) != 0) { if (di_info[pre_field_counter % 4][4] < di_info[(pre_field_counter - 1) % 4][4]) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 1; } else { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 0; } } } pattern_len = j; break; } } } } else { int i, pattern, pattern_2, mask; mask = (1 << pattern_len) - 1; pattern = di_p32_info & mask; pattern_2 = di_p32_info_2 & mask; for (i = pattern_len ; i < pattern_len * PATTERN32_NUM ; i += pattern_len) if (((di_p32_info >> i) & mask) != pattern || ((di_p32_info_2 >> i) & mask) != pattern_2) { break; } if (i == pattern_len * PATTERN32_NUM) { if ((pattern_len == 5) && ((pattern & (pattern - 1)) == 0)) { if ((di_p32_info & 0x1) || (di_p32_info & 0x2) || (di_p32_info & 0x8)) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 0; } else { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 1; } } else { if ((pattern & (pattern - 1)) != 0) { if (di_info[pre_field_counter % 4][4] < di_info[(pre_field_counter - 1) % 4][4]) { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 1; } else { di_buf_pool[(pre_field_counter - 1) % DI_BUF_NUM].blend_mode = 0; } } } } else { pattern_len = 0; di_buf_pool[(pre_field_counter - 2) % DI_BUF_NUM].blend_mode = 3; } } } } } void set_vdin_par(int flag, vframe_t *buf) { vdin_en = flag; memcpy(&dummy_buf, buf, sizeof(vframe_t)); } void di_pre_process(void) { unsigned temp = READ_MPEG_REG(DI_INTR_CTRL); unsigned status = READ_MPEG_REG(DI_PRE_CTRL) & 0x2; #if defined(CONFIG_ARCH_MESON2) int nr_hfilt_en, nr_hfilt_mb_en; if (noise_reduction_level == 2) { nr_hfilt_en = 1; nr_hfilt_mb_en = 1; } else { nr_hfilt_en = 0; nr_hfilt_mb_en = 0; } #endif if (deinterlace_mode != 2) { return; } if ((prev_struct == 0) && (READ_MPEG_REG(DI_PRE_SIZE) != ((32 - 1) | ((64 - 1) << 16)))) { disable_pre_deinterlace(); } if (prev_struct > 0) { #if defined(CONFIG_ARCH_MESON) if ((temp & 0xf) != (status | 0x9)) #elif defined(CONFIG_ARCH_MESON2) if ((temp & 0xf) != (status | 0x1)) #endif return; if (!vdin_en && (prog_field_count == 0) && (buf_recycle_done == 0)) { buf_recycle_done = 1; vf_put(cur_buf, RECEIVER_NAME); } if (di_pre_post_done == 0) { di_pre_post_done = 1; pattern_check_pre(); memcpy((&di_buf_pool[pre_field_counter % DI_BUF_NUM]), cur_buf, sizeof(vframe_t)); di_buf_pool[pre_field_counter % DI_BUF_NUM].blend_mode = blend_mode; di_buf_pool[pre_field_counter % DI_BUF_NUM].canvas0Addr = DEINTERLACE_CANVAS_BASE_INDEX + 4; di_buf_pool[pre_field_counter % DI_BUF_NUM].canvas1Addr = DEINTERLACE_CANVAS_BASE_INDEX + 4; if (prev_struct == 1) { di_buf_pool[pre_field_counter % DI_BUF_NUM].type = VIDTYPE_INTERLACE_TOP | VIDTYPE_VIU_422 | VIDTYPE_VIU_SINGLE_PLANE | VIDTYPE_VIU_FIELD; } else { di_buf_pool[pre_field_counter % DI_BUF_NUM].type = VIDTYPE_INTERLACE_BOTTOM | VIDTYPE_VIU_422 | VIDTYPE_VIU_SINGLE_PLANE | VIDTYPE_VIU_FIELD; } pre_field_counter++; } if ((pre_field_counter >= field_counter + DI_BUF_NUM - 3) && ((pre_field_counter >= field_counter + DI_BUF_NUM - 2) || (field_counter == 0))) { #ifdef DEBUG di_pre_overflow++; #endif return; } if (!vdin_en && (prog_field_count == 0) && (!vf_peek(RECEIVER_NAME))) { #ifdef DEBUG di_pre_underflow++; #endif return; } } if (prog_field_count > 0) { blend_mode = 0; prog_field_count--; prev_struct = 3 - prev_struct; } else { if (vdin_en) { di_pre_recycle_buf = 1; cur_buf = &dummy_buf; } else { cur_buf = vf_peek(RECEIVER_NAME); if (!cur_buf) { return; } if ((cur_buf->duration == 0) #if defined(CONFIG_AM_DEINTERLACE_SD_ONLY) || (cur_buf->width > 720) #endif ) { di_pre_recycle_buf = 0; return; } di_pre_recycle_buf = 1; cur_buf = vf_get(RECEIVER_NAME); } if (((cur_buf->type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_TOP && prev_struct == 1) || ((cur_buf->type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_BOTTOM && prev_struct == 2)) { if (!vdin_en) { vf_put(cur_buf, RECEIVER_NAME); } return; } di_inp_top_mif.canvas0_addr0 = di_inp_bot_mif.canvas0_addr0 = cur_buf->canvas0Addr & 0xff; di_inp_top_mif.canvas0_addr1 = di_inp_bot_mif.canvas0_addr1 = (cur_buf->canvas0Addr >> 8) & 0xff; di_inp_top_mif.canvas0_addr2 = di_inp_bot_mif.canvas0_addr2 = (cur_buf->canvas0Addr >> 16) & 0xff; blend_mode = 3; if ((cur_buf->type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_TOP) { prev_struct = 1; prog_field_count = 0; } else if ((cur_buf->type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_BOTTOM) { prev_struct = 2; prog_field_count = 0; } else { if (prev_struct == 0) { prev_struct = 1; } else { prev_struct = 3 - prev_struct; } if (cur_buf->duration_pulldown > 0) { prog_field_count = 2; } else { prog_field_count = 1; } blend_mode = 1; cur_buf->duration >>= 1; cur_buf->duration_pulldown = 0; } } buf_recycle_done = 0; di_pre_post_done = 0; WRITE_MPEG_REG(DI_INTR_CTRL, temp); if ((READ_MPEG_REG(DI_PRE_SIZE) != ((cur_buf->width - 1) | ((cur_buf->height / 2 - 1) << 16)))) { WRITE_MPEG_REG(DI_INTR_CTRL, 0x000f000f); initial_di_pre(cur_buf->width, cur_buf->height / 2, PRE_HOLD_LINE); di_checked_field = (field_counter + di_checked_field + 1) % DI_BUF_NUM; pre_field_counter = field_counter = 0; di_p32_info = di_p22_info = di_p32_info_2 = di_p22_info_2 = 0; pattern_len = 0; di_mem_mif.luma_x_start0 = 0; di_mem_mif.luma_x_end0 = cur_buf->width - 1; di_mem_mif.luma_y_start0 = 0; di_mem_mif.luma_y_end0 = cur_buf->height / 2 - 1; di_chan2_mif.luma_x_start0 = 0; di_chan2_mif.luma_x_end0 = cur_buf->width - 1; di_chan2_mif.luma_y_start0 = 0; di_chan2_mif.luma_y_end0 = cur_buf->height / 2 - 1; di_nrwr_mif.start_x = 0; di_nrwr_mif.end_x = cur_buf->width - 1; di_nrwr_mif.start_y = 0; di_nrwr_mif.end_y = cur_buf->height / 2 - 1; di_mtnwr_mif.start_x = 0; di_mtnwr_mif.end_x = cur_buf->width - 1; di_mtnwr_mif.start_y = 0; di_mtnwr_mif.end_y = cur_buf->height / 2 - 1; if (cur_buf->type & VIDTYPE_VIU_422) { di_inp_top_mif.video_mode = 0; di_inp_top_mif.set_separate_en = 0; di_inp_top_mif.src_field_mode = 0; di_inp_top_mif.output_field_num = 0; di_inp_top_mif.burst_size_y = 3; di_inp_top_mif.burst_size_cb = 0; di_inp_top_mif.burst_size_cr = 0; memcpy(&di_inp_bot_mif, &di_inp_top_mif, sizeof(DI_MIF_t)); di_inp_top_mif.luma_x_start0 = 0; di_inp_top_mif.luma_x_end0 = cur_buf->width - 1; di_inp_top_mif.luma_y_start0 = 0; di_inp_top_mif.luma_y_end0 = cur_buf->height / 2 - 1; di_inp_top_mif.chroma_x_start0 = 0; di_inp_top_mif.chroma_x_end0 = 0; di_inp_top_mif.chroma_y_start0 = 0; di_inp_top_mif.chroma_y_end0 = 0; di_inp_bot_mif.luma_x_start0 = 0; di_inp_bot_mif.luma_x_end0 = cur_buf->width - 1; di_inp_bot_mif.luma_y_start0 = 0; di_inp_bot_mif.luma_y_end0 = cur_buf->height / 2 - 1; di_inp_bot_mif.chroma_x_start0 = 0; di_inp_bot_mif.chroma_x_end0 = 0; di_inp_bot_mif.chroma_y_start0 = 0; di_inp_bot_mif.chroma_y_end0 = 0; } else { di_inp_top_mif.video_mode = 0; di_inp_top_mif.set_separate_en = 1; di_inp_top_mif.src_field_mode = 1; di_inp_top_mif.burst_size_y = 3; di_inp_top_mif.burst_size_cb = 1; di_inp_top_mif.burst_size_cr = 1; memcpy(&di_inp_bot_mif, &di_inp_top_mif, sizeof(DI_MIF_t)); di_inp_top_mif.output_field_num = 0; // top di_inp_bot_mif.output_field_num = 1; // bottom di_inp_top_mif.luma_x_start0 = 0; di_inp_top_mif.luma_x_end0 = cur_buf->width - 1; di_inp_top_mif.luma_y_start0 = 0; di_inp_top_mif.luma_y_end0 = cur_buf->height - 2; di_inp_top_mif.chroma_x_start0 = 0; di_inp_top_mif.chroma_x_end0 = cur_buf->width / 2 - 1; di_inp_top_mif.chroma_y_start0 = 0; di_inp_top_mif.chroma_y_end0 = cur_buf->height / 2 - 2; di_inp_bot_mif.luma_x_start0 = 0; di_inp_bot_mif.luma_x_end0 = cur_buf->width - 1; di_inp_bot_mif.luma_y_start0 = 1; di_inp_bot_mif.luma_y_end0 = cur_buf->height - 1; di_inp_bot_mif.chroma_x_start0 = 0; di_inp_bot_mif.chroma_x_end0 = cur_buf->width / 2 - 1; di_inp_bot_mif.chroma_y_start0 = 1; di_inp_bot_mif.chroma_y_end0 = cur_buf->height / 2 - 1; } di_nrwr_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX; di_mtnwr_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 1; di_chan2_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + 2; di_mem_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + 3; di_buf0_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + 4; di_buf1_mif.canvas0_addr0 = DEINTERLACE_CANVAS_BASE_INDEX + 5; di_mtncrd_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 6; di_mtnprd_mif.canvas_num = DEINTERLACE_CANVAS_BASE_INDEX + 7; enable_di_mode_check( 0, cur_buf->width - 1, 0, cur_buf->height / 2 - 1, // window 0 ( start_x, end_x, start_y, end_y) 0, cur_buf->width - 1, 0, cur_buf->height / 2 - 1, // window 1 ( start_x, end_x, start_y, end_y) 0, cur_buf->width - 1, 0, cur_buf->height / 2 - 1, // window 2 ( start_x, end_x, start_y, end_y) 0, cur_buf->width - 1, 0, cur_buf->height / 2 - 1, // window 3 ( start_x, end_x, start_y, end_y) 0, cur_buf->width - 1, 0, cur_buf->height / 2 - 1, // window 4 ( start_x, end_x, start_y, end_y) 16, 16, 16, 16, 16, // windows 32 level 256, 256, 256, 256, 256, // windows 22 level 16, 256); // field 32 level; field 22 level } temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((pre_field_counter + di_checked_field) % DI_BUF_NUM); canvas_config(di_nrwr_mif.canvas_num, temp, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((pre_field_counter + di_checked_field) % DI_BUF_NUM) + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT); canvas_config(di_mtnwr_mif.canvas_num, temp, MAX_CANVAS_WIDTH / 2, MAX_CANVAS_HEIGHT / 2, 0, 0); temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((pre_field_counter + di_checked_field + DI_BUF_NUM - 1) % DI_BUF_NUM); canvas_config(di_chan2_mif.canvas0_addr0, temp, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); temp = di_mem_start + (MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT * 5 / 4) * ((pre_field_counter + di_checked_field + DI_BUF_NUM - 2) % DI_BUF_NUM); canvas_config(di_mem_mif.canvas0_addr0, temp, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); WRITE_MPEG_REG(DI_PRE_CTRL, 0x3 << 30); enable_di_pre( (prev_struct == 1) ? &di_inp_top_mif : &di_inp_bot_mif, (pre_field_counter < 2) ? ((prev_struct == 1) ? &di_inp_top_mif : &di_inp_bot_mif) : &di_mem_mif, &di_chan2_mif, &di_nrwr_mif, &di_mtnwr_mif, 1, // nr enable (pre_field_counter >= 2), // mtn enable (pre_field_counter >= 2), // 3:2 pulldown check enable (pre_field_counter >= 1), // 2:2 pulldown check enable #if defined(CONFIG_ARCH_MESON) 1, // hist check_en #elif defined(CONFIG_ARCH_MESON2) 0, // hist check_en nr_hfilt_en, // nr_hfilt_en nr_hfilt_mb_en, // nr_hfilt_mb_en 1, // mtn_modify_en, #elif defined(CONFIG_ARCH_MESON3) 1, // hist check_en #endif (prev_struct == 1) ? 1 : 0, // field num for chan2. 1 bottom, 0 top. 0, // pre viu link. PRE_HOLD_LINE ); } void di_pre_isr(struct work_struct *work) { if (!vf_get_provider(RECEIVER_NAME)) { return; } di_pre_process(); } void run_deinterlace(unsigned zoom_start_x_lines, unsigned zoom_end_x_lines, unsigned zoom_start_y_lines, unsigned zoom_end_y_lines, unsigned type, int mode, int hold_line) { int di_width, di_height, di_start_x, di_end_x, di_start_y, di_end_y, size_change, position_change; #if defined(CONFIG_ARCH_MESON2) int nr_hfilt_en, nr_hfilt_mb_en, post_mb_en; if (noise_reduction_level == 2) { nr_hfilt_en = 1; nr_hfilt_mb_en = 1; post_mb_en = 1; } else { nr_hfilt_en = 0; nr_hfilt_mb_en = 0; post_mb_en = 0; } #endif di_start_x = zoom_start_x_lines; di_end_x = zoom_end_x_lines; di_width = di_end_x - di_start_x + 1; di_start_y = (zoom_start_y_lines + 1) & 0xfffffffe; di_end_y = (zoom_end_y_lines - 1) | 0x1; di_height = di_end_y - di_start_y + 1; if (deinterlace_mode == 1) { int i; unsigned long addr = di_mem_start; size_change = (READ_MPEG_REG(DI_POST_SIZE) != ((di_width - 1) | ((di_height - 1) << 16))); position_change = ((di_inp_top_mif.luma_x_start0 != di_start_x) || (di_inp_top_mif.luma_y_start0 != di_start_y)); if (size_change || position_change) { if (size_change) { initial_di_prepost(di_width, di_height / 2, di_width, di_height, hold_line); pattern_22 = 0; di_p32_info = di_p22_info = di_p32_info_2 = di_p22_info_2 = 0; pattern_len = 0; } di_mem_mif.luma_x_start0 = di_start_x; di_mem_mif.luma_x_end0 = di_end_x; di_mem_mif.luma_y_start0 = di_start_y / 2; di_mem_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_buf0_mif.luma_x_start0 = di_start_x; di_buf0_mif.luma_x_end0 = di_end_x; di_buf0_mif.luma_y_start0 = di_start_y / 2; di_buf0_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_chan2_mif.luma_x_start0 = di_start_x; di_chan2_mif.luma_x_end0 = di_end_x; di_chan2_mif.luma_y_start0 = di_start_y / 2; di_chan2_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_nrwr_mif.start_x = di_start_x; di_nrwr_mif.end_x = di_end_x; di_nrwr_mif.start_y = di_start_y / 2; di_nrwr_mif.end_y = (di_end_y + 1) / 2 - 1; di_mtnwr_mif.start_x = di_start_x; di_mtnwr_mif.end_x = di_end_x; di_mtnwr_mif.start_y = di_start_y / 2; di_mtnwr_mif.end_y = (di_end_y + 1) / 2 - 1; di_mtncrd_mif.start_x = di_start_x; di_mtncrd_mif.end_x = di_end_x; di_mtncrd_mif.start_y = di_start_y / 2; di_mtncrd_mif.end_y = (di_end_y + 1) / 2 - 1; enable_di_mode_check( di_start_x, di_end_x, di_start_y, (di_end_y + 1) / 2 - 1, // window 0 ( start_x, end_x, start_y, end_y) di_start_x, di_end_x, di_start_y, (di_end_y + 1) / 2 - 1, // window 1 ( start_x, end_x, start_y, end_y) di_start_x, di_end_x, di_start_y, (di_end_y + 1) / 2 - 1, // window 2 ( start_x, end_x, start_y, end_y) di_start_x, di_end_x, di_start_y, (di_end_y + 1) / 2 - 1, // window 3 ( start_x, end_x, start_y, end_y) di_start_x, di_end_x, di_start_y, (di_end_y + 1) / 2 - 1, // window 4 ( start_x, end_x, start_y, end_y) 16, 16, 16, 16, 16, // windows 32 level 256, 256, 256, 256, 256, // windows 22 level 16, 256); // field 32 level; field 22 level pre_field_counter = field_counter = di_checked_field = 0; if (type & VIDTYPE_VIU_422) { di_inp_top_mif.video_mode = 0; di_inp_top_mif.set_separate_en = 0; di_inp_top_mif.src_field_mode = 0; di_inp_top_mif.output_field_num = 0; di_inp_top_mif.burst_size_y = 3; di_inp_top_mif.burst_size_cb = 0; di_inp_top_mif.burst_size_cr = 0; memcpy(&di_inp_bot_mif, &di_inp_top_mif, sizeof(DI_MIF_t)); di_inp_top_mif.luma_x_start0 = di_start_x; di_inp_top_mif.luma_x_end0 = di_end_x; di_inp_top_mif.luma_y_start0 = di_start_y; di_inp_top_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_inp_top_mif.chroma_x_start0 = 0; di_inp_top_mif.chroma_x_end0 = 0; di_inp_top_mif.chroma_y_start0 = 0; di_inp_top_mif.chroma_y_end0 = 0; di_inp_bot_mif.luma_x_start0 = di_start_x; di_inp_bot_mif.luma_x_end0 = di_end_x; di_inp_bot_mif.luma_y_start0 = di_start_y; di_inp_bot_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_inp_bot_mif.chroma_x_start0 = 0; di_inp_bot_mif.chroma_x_end0 = 0; di_inp_bot_mif.chroma_y_start0 = 0; di_inp_bot_mif.chroma_y_end0 = 0; } else { di_inp_top_mif.video_mode = 0; di_inp_top_mif.set_separate_en = 1; di_inp_top_mif.src_field_mode = 1; di_inp_top_mif.burst_size_y = 3; di_inp_top_mif.burst_size_cb = 1; di_inp_top_mif.burst_size_cr = 1; memcpy(&di_inp_bot_mif, &di_inp_top_mif, sizeof(DI_MIF_t)); di_inp_top_mif.output_field_num = 0; // top di_inp_bot_mif.output_field_num = 1; // bottom di_inp_top_mif.luma_x_start0 = di_start_x; di_inp_top_mif.luma_x_end0 = di_end_x; di_inp_top_mif.luma_y_start0 = di_start_y; di_inp_top_mif.luma_y_end0 = di_end_y - 1; di_inp_top_mif.chroma_x_start0 = di_start_x / 2; di_inp_top_mif.chroma_x_end0 = (di_end_x + 1) / 2 - 1; di_inp_top_mif.chroma_y_start0 = di_start_y / 2; di_inp_top_mif.chroma_y_end0 = (di_end_y + 1) / 2 - 2; di_inp_bot_mif.luma_x_start0 = di_start_x; di_inp_bot_mif.luma_x_end0 = di_end_x; di_inp_bot_mif.luma_y_start0 = di_start_y + 1; di_inp_bot_mif.luma_y_end0 = di_end_y; di_inp_bot_mif.chroma_x_start0 = di_start_x / 2; di_inp_bot_mif.chroma_x_end0 = (di_end_x + 1) / 2 - 1; di_inp_bot_mif.chroma_y_start0 = di_start_y / 2 + 1; di_inp_bot_mif.chroma_y_end0 = (di_end_y + 1) / 2 - 1; } for (i = 0 ; i < 4 ; i++) { canvas_config(DEINTERLACE_CANVAS_BASE_INDEX + i, addr, MAX_CANVAS_WIDTH * 2, MAX_CANVAS_HEIGHT / 2, 0, 0); addr += MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT; } for (i = 4 ; i < 8 ; i++) { canvas_config(DEINTERLACE_CANVAS_BASE_INDEX + i, addr, MAX_CANVAS_WIDTH / 2, MAX_CANVAS_HEIGHT / 2, 0, 0); addr += MAX_CANVAS_WIDTH * MAX_CANVAS_HEIGHT / 4; } di_inp_top_mif.canvas0_addr0 = di_inp_bot_mif.canvas0_addr0 = DISPLAY_CANVAS_BASE_INDEX; di_inp_top_mif.canvas0_addr1 = di_inp_bot_mif.canvas0_addr1 = DISPLAY_CANVAS_BASE_INDEX + 1; di_inp_top_mif.canvas0_addr2 = di_inp_bot_mif.canvas0_addr2 = DISPLAY_CANVAS_BASE_INDEX + 2; } pattern_check_prepost(); #if defined(CONFIG_ARCH_MESON) if ((type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_TOP) { enable_di_prepost_full( &di_inp_top_mif, &di_mem_mif, (field_counter < 1 ? &di_inp_top_mif : &di_buf0_mif), NULL, &di_chan2_mif, &di_nrwr_mif, NULL, &di_mtnwr_mif, &di_mtncrd_mif, NULL, (pre_field_counter != field_counter || field_counter == 0), // noise reduction enable (field_counter >= 2), // motion check enable (pre_field_counter != field_counter && field_counter >= 2), // 3:2 pulldown check enable (pre_field_counter != field_counter && field_counter >= 1), // 2:2 pulldown check enable (pre_field_counter != field_counter || field_counter == 0), // video luma histogram check enable 1, // edge interpolation module enable. (field_counter >= 2), // blend enable. (field_counter >= 2), // blend with mtn. (field_counter < 2 ? 2 : blend_mode), // blend mode 1, // deinterlace output to VPP. 0, // deinterlace output to DDR SDRAM at same time. (field_counter >= 1), // 1 = current display field is bottom field, we need generated top field. (field_counter >= 1), // pre field num: 1 = current chan2 input field is bottom field. (field_counter >= 1), // prepost link. for the first field it look no need to be propost_link. hold_line ); } else { enable_di_prepost_full( &di_inp_bot_mif, &di_mem_mif, (field_counter < 1 ? &di_inp_bot_mif : &di_buf0_mif), NULL, &di_chan2_mif, &di_nrwr_mif, NULL, &di_mtnwr_mif, &di_mtncrd_mif, NULL, (pre_field_counter != field_counter || field_counter == 0), // noise reduction enable (field_counter >= 2), // motion check enable (pre_field_counter != field_counter && field_counter >= 2), // 3:2 pulldown check enable (pre_field_counter != field_counter && field_counter >= 1), // 2:2 pulldown check enable (pre_field_counter != field_counter || field_counter == 0), // video luma histogram check enable 1, // edge interpolation module enable. (field_counter >= 2), // blend enable. (field_counter >= 2), // blend with mtn. (field_counter < 2 ? 2 : blend_mode), // blend mode: 3 motion adapative blend. 1, // deinterlace output to VPP. 0, // deinterlace output to DDR SDRAM at same time. (field_counter < 1), // 1 = current display field is bottom field, we need generated top field. (field_counter < 1), // pre field num. 1 = current chan2 input field is bottom field. (field_counter >= 1), // prepost link. for the first field it look no need to be propost_link. hold_line ); } #elif defined(CONFIG_ARCH_MESON2) if ((type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_TOP) { enable_di_prepost_full( &di_inp_top_mif, (field_counter < 2 ? &di_inp_top_mif : &di_mem_mif), NULL, &di_buf1_mif, &di_chan2_mif, &di_nrwr_mif, NULL, &di_mtnwr_mif, &di_mtncrd_mif, &di_mtnprd_mif, (pre_field_counter != field_counter || field_counter == 0), // noise reduction enable (field_counter >= 2), // motion check enable (pre_field_counter != field_counter && field_counter >= 2), // 3:2 pulldown check enable (pre_field_counter != field_counter && field_counter >= 1), // 2:2 pulldown check enable (pre_field_counter != field_counter || field_counter == 0), // video luma histogram check enable 1, // edge interpolation module enable. (field_counter >= 3), // blend enable. (field_counter >= 3), // blend with mtn. (field_counter < 3 ? 2 : blend_mode), // blend mode 1, // deinterlace output to VPP. 0, // deinterlace output to DDR SDRAM at same time. nr_hfilt_en, // nr_hfilt_en nr_hfilt_mb_en, // nr_hfilt_mb_en 1, // mtn_modify_en, 1, // blend_mtn_filt_en 1, // blend_data_filt_en post_mb_en, // post_mb_en 0, // 1 = current display field is bottom field, we need generated top field. 1, // pre field num: 1 = current chan2 input field is bottom field. (field_counter >= 2), // prepost link. for the first field it look no need to be propost_link. hold_line ); } else { enable_di_prepost_full( &di_inp_bot_mif, (field_counter < 2 ? &di_inp_top_mif : &di_mem_mif), NULL, &di_buf1_mif, &di_chan2_mif, &di_nrwr_mif, NULL, &di_mtnwr_mif, &di_mtncrd_mif, &di_mtnprd_mif, (pre_field_counter != field_counter || field_counter == 0), // noise reduction enable (field_counter >= 2), // motion check enable (pre_field_counter != field_counter && field_counter >= 2), // 3:2 pulldown check enable (pre_field_counter != field_counter && field_counter >= 1), // 2:2 pulldown check enable (pre_field_counter != field_counter || field_counter == 0), // video luma histogram check enable 1, // edge interpolation module enable. (field_counter >= 3), // blend enable. (field_counter >= 3), // blend with mtn. (field_counter < 3 ? 2 : blend_mode), // blend mode: 3 motion adapative blend. 1, // deinterlace output to VPP. 0, // deinterlace output to DDR SDRAM at same time. nr_hfilt_en, // nr_hfilt_en nr_hfilt_mb_en, // nr_hfilt_mb_en 1, // mtn_modify_en, 1, // blend_mtn_filt_en 1, // blend_data_filt_en post_mb_en, // post_mb_en 1, // 1 = current display field is bottom field, we need generated top field. 0, // pre field num. 1 = current chan2 input field is bottom field. (field_counter >= 2), // prepost link. for the first field it look no need to be propost_link. hold_line ); } #endif pre_field_counter = field_counter; } else { int post_blend_en, post_blend_mode; if (READ_MPEG_REG(DI_POST_SIZE) != ((di_width - 1) | ((di_height - 1) << 16)) || (di_buf0_mif.luma_x_start0 != di_start_x) || (di_buf0_mif.luma_y_start0 != di_start_y / 2)) { initial_di_post(di_width, di_height, hold_line); di_buf0_mif.luma_x_start0 = di_start_x; di_buf0_mif.luma_x_end0 = di_end_x; di_buf0_mif.luma_y_start0 = di_start_y / 2; di_buf0_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_buf1_mif.luma_x_start0 = di_start_x; di_buf1_mif.luma_x_end0 = di_end_x; di_buf1_mif.luma_y_start0 = di_start_y / 2; di_buf1_mif.luma_y_end0 = (di_end_y + 1) / 2 - 1; di_mtncrd_mif.start_x = di_start_x; di_mtncrd_mif.end_x = di_end_x; di_mtncrd_mif.start_y = di_start_y / 2; di_mtncrd_mif.end_y = (di_end_y + 1) / 2 - 1; di_mtnprd_mif.start_x = di_start_x; di_mtnprd_mif.end_x = di_end_x; di_mtnprd_mif.start_y = di_start_y / 2; di_mtnprd_mif.end_y = (di_end_y + 1) / 2 - 1; } post_blend_en = 1; post_blend_mode = mode; if ((post_blend_mode == 3) && (field_counter <= 2)) { post_blend_en = 0; post_blend_mode = 2; } enable_di_post( &di_buf0_mif, &di_buf1_mif, NULL, &di_mtncrd_mif, &di_mtnprd_mif, 1, // ei enable post_blend_en, // blend enable post_blend_en, // blend mtn enable post_blend_mode, // blend mode. 1, // di_vpp_en. 0, // di_ddr_en. #if defined(CONFIG_ARCH_MESON) #elif defined(CONFIG_ARCH_MESON2) 1, // blend_mtn_filt_en 1, // blend_data_filt_en post_mb_en, // post_mb_en #endif (type & VIDTYPE_TYPEMASK) == VIDTYPE_INTERLACE_TOP ? 0 : 1, // 1 bottom generate top hold_line ); } } void di_pre_timer_func(unsigned long arg) { struct timer_list *timer = (struct timer_list *)arg; schedule_work(&di_pre_work); timer->expires = jiffies + DI_PRE_INTERVAL; add_timer(timer); } void deinterlace_init(void) { di_mem_mif.chroma_x_start0 = 0; di_mem_mif.chroma_x_end0 = 0; di_mem_mif.chroma_y_start0 = 0; di_mem_mif.chroma_y_end0 = 0; di_mem_mif.video_mode = 0; di_mem_mif.set_separate_en = 0; di_mem_mif.src_field_mode = 0; di_mem_mif.output_field_num = 0; di_mem_mif.burst_size_y = 3; di_mem_mif.burst_size_cb = 0; di_mem_mif.burst_size_cr = 0; di_mem_mif.canvas0_addr1 = 0; di_mem_mif.canvas0_addr2 = 0; memcpy(&di_buf0_mif, &di_mem_mif, sizeof(DI_MIF_t)); memcpy(&di_buf1_mif, &di_mem_mif, sizeof(DI_MIF_t)); memcpy(&di_chan2_mif, &di_buf1_mif, sizeof(DI_MIF_t)); WRITE_MPEG_REG(DI_PRE_HOLD, (1 << 31) | (31 << 16) | 31); #if defined(CONFIG_ARCH_MESON) WRITE_MPEG_REG(DI_NRMTN_CTRL0, 0xb00a0603); #endif INIT_WORK(&di_pre_work, di_pre_isr); init_timer(&di_pre_timer); di_pre_timer.data = (ulong) & di_pre_timer; di_pre_timer.function = di_pre_timer_func; di_pre_timer.expires = jiffies + DI_PRE_INTERVAL; add_timer(&di_pre_timer); } static int deinterlace_probe(struct platform_device *pdev) { struct resource *mem; printk("Amlogic deinterlace init\n"); if (!(mem = platform_get_resource(pdev, IORESOURCE_MEM, 0))) { printk("\ndeinterlace memory resource undefined.\n"); return -EFAULT; } // declare deinterlace memory di_mem_start = mem->start; printk("Deinterlace memory: start = 0x%x, end = 0x%x\n", di_mem_start, mem->end); deinterlace_init(); return 0; } static int deinterlace_remove(struct platform_device *pdev) { printk("Amlogic deinterlace release\n"); del_timer_sync(&di_pre_timer); return 0; } static struct platform_driver deinterlace_driver = { .probe = deinterlace_probe, .remove = deinterlace_remove, .driver = { .name = "deinterlace", } }; static int __init deinterlace_module_init(void) { if (platform_driver_register(&deinterlace_driver)) { printk("failed to register deinterlace module\n"); return -ENODEV; } return 0; } static void __exit deinterlace_module_exit(void) { platform_driver_unregister(&deinterlace_driver); return; } MODULE_PARM_DESC(deinterlace_mode, "\n deinterlace mode \n"); module_param(deinterlace_mode, int, 0664); #if defined(CONFIG_ARCH_MESON2) MODULE_PARM_DESC(noise_reduction_level, "\n noise reduction level \n"); module_param(noise_reduction_level, int, 0664); #endif module_init(deinterlace_module_init); module_exit(deinterlace_module_exit); MODULE_DESCRIPTION("AMLOGIC deinterlace driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Qi Wang <qi.wang@amlogic.com>");
j1nx/Openlinux.Amlogic.M3
drivers/amlogic/amports/deinterlace.c
C
gpl-2.0
136,980
/* Top-level LTO routines. Copyright (C) 2009-2015 Free Software Foundation, Inc. Contributed by CodeSourcery, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "opts.h" #include "toplev.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "options.h" #include "wide-int.h" #include "inchash.h" #include "real.h" #include "fixed-value.h" #include "tree.h" #include "fold-const.h" #include "stor-layout.h" #include "diagnostic-core.h" #include "tm.h" #include "predict.h" #include "basic-block.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-ssa-operands.h" #include "tree-pass.h" #include "langhooks.h" #include "bitmap.h" #include "inchash.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "ipa-prop.h" #include "common.h" #include "debug.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-expr.h" #include "gimple.h" #include "lto.h" #include "lto-tree.h" #include "lto-streamer.h" #include "lto-section-names.h" #include "tree-streamer.h" #include "splay-tree.h" #include "lto-partition.h" #include "data-streamer.h" #include "context.h" #include "pass_manager.h" #include "ipa-inline.h" #include "params.h" #include "ipa-utils.h" #include "gomp-constants.h" /* Number of parallel tasks to run, -1 if we want to use GNU Make jobserver. */ static int lto_parallelism; static GTY(()) tree first_personality_decl; static GTY(()) const unsigned char *lto_mode_identity_table; /* Returns a hash code for P. */ static hashval_t hash_name (const void *p) { const struct lto_section_slot *ds = (const struct lto_section_slot *) p; return (hashval_t) htab_hash_string (ds->name); } /* Returns nonzero if P1 and P2 are equal. */ static int eq_name (const void *p1, const void *p2) { const struct lto_section_slot *s1 = (const struct lto_section_slot *) p1; const struct lto_section_slot *s2 = (const struct lto_section_slot *) p2; return strcmp (s1->name, s2->name) == 0; } /* Free lto_section_slot */ static void free_with_string (void *arg) { struct lto_section_slot *s = (struct lto_section_slot *)arg; free (CONST_CAST (char *, s->name)); free (arg); } /* Create section hash table */ htab_t lto_obj_create_section_hash_table (void) { return htab_create (37, hash_name, eq_name, free_with_string); } /* Delete an allocated integer KEY in the splay tree. */ static void lto_splay_tree_delete_id (splay_tree_key key) { free ((void *) key); } /* Compare splay tree node ids A and B. */ static int lto_splay_tree_compare_ids (splay_tree_key a, splay_tree_key b) { unsigned HOST_WIDE_INT ai; unsigned HOST_WIDE_INT bi; ai = *(unsigned HOST_WIDE_INT *) a; bi = *(unsigned HOST_WIDE_INT *) b; if (ai < bi) return -1; else if (ai > bi) return 1; return 0; } /* Look up splay tree node by ID in splay tree T. */ static splay_tree_node lto_splay_tree_lookup (splay_tree t, unsigned HOST_WIDE_INT id) { return splay_tree_lookup (t, (splay_tree_key) &id); } /* Check if KEY has ID. */ static bool lto_splay_tree_id_equal_p (splay_tree_key key, unsigned HOST_WIDE_INT id) { return *(unsigned HOST_WIDE_INT *) key == id; } /* Insert a splay tree node into tree T with ID as key and FILE_DATA as value. The ID is allocated separately because we need HOST_WIDE_INTs which may be wider than a splay_tree_key. */ static void lto_splay_tree_insert (splay_tree t, unsigned HOST_WIDE_INT id, struct lto_file_decl_data *file_data) { unsigned HOST_WIDE_INT *idp = XCNEW (unsigned HOST_WIDE_INT); *idp = id; splay_tree_insert (t, (splay_tree_key) idp, (splay_tree_value) file_data); } /* Create a splay tree. */ static splay_tree lto_splay_tree_new (void) { return splay_tree_new (lto_splay_tree_compare_ids, lto_splay_tree_delete_id, NULL); } /* Return true when NODE has a clone that is analyzed (i.e. we need to load its body even if the node itself is not needed). */ static bool has_analyzed_clone_p (struct cgraph_node *node) { struct cgraph_node *orig = node; node = node->clones; if (node) while (node != orig) { if (node->analyzed) return true; if (node->clones) node = node->clones; else if (node->next_sibling_clone) node = node->next_sibling_clone; else { while (node != orig && !node->next_sibling_clone) node = node->clone_of; if (node != orig) node = node->next_sibling_clone; } } return false; } /* Read the function body for the function associated with NODE. */ static void lto_materialize_function (struct cgraph_node *node) { tree decl; decl = node->decl; /* Read in functions with body (analyzed nodes) and also functions that are needed to produce virtual clones. */ if ((node->has_gimple_body_p () && node->analyzed) || node->used_as_abstract_origin || has_analyzed_clone_p (node)) { /* Clones don't need to be read. */ if (node->clone_of) return; if (DECL_FUNCTION_PERSONALITY (decl) && !first_personality_decl) first_personality_decl = DECL_FUNCTION_PERSONALITY (decl); } /* Let the middle end know about the function. */ rest_of_decl_compilation (decl, 1, 0); } /* Decode the content of memory pointed to by DATA in the in decl state object STATE. DATA_IN points to a data_in structure for decoding. Return the address after the decoded object in the input. */ static const uint32_t * lto_read_in_decl_state (struct data_in *data_in, const uint32_t *data, struct lto_in_decl_state *state) { uint32_t ix; tree decl; uint32_t i, j; ix = *data++; decl = streamer_tree_cache_get_tree (data_in->reader_cache, ix); if (!VAR_OR_FUNCTION_DECL_P (decl)) { gcc_assert (decl == void_type_node); decl = NULL_TREE; } state->fn_decl = decl; for (i = 0; i < LTO_N_DECL_STREAMS; i++) { uint32_t size = *data++; vec<tree, va_gc> *decls = NULL; vec_alloc (decls, size); for (j = 0; j < size; j++) vec_safe_push (decls, streamer_tree_cache_get_tree (data_in->reader_cache, data[j])); state->streams[i] = decls; data += size; } return data; } /* Global canonical type table. */ static htab_t gimple_canonical_types; static hash_map<const_tree, hashval_t> *canonical_type_hash_cache; static unsigned long num_canonical_type_hash_entries; static unsigned long num_canonical_type_hash_queries; static void iterative_hash_canonical_type (tree type, inchash::hash &hstate); static hashval_t gimple_canonical_type_hash (const void *p); static void gimple_register_canonical_type_1 (tree t, hashval_t hash); /* Returning a hash value for gimple type TYPE. The hash value returned is equal for types considered compatible by gimple_canonical_types_compatible_p. */ static hashval_t hash_canonical_type (tree type) { inchash::hash hstate; /* Combine a few common features of types so that types are grouped into smaller sets; when searching for existing matching types to merge, only existing types having the same features as the new type will be checked. */ hstate.add_int (TREE_CODE (type)); hstate.add_int (TYPE_MODE (type)); /* Incorporate common features of numerical types. */ if (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type) || FIXED_POINT_TYPE_P (type) || TREE_CODE (type) == OFFSET_TYPE || POINTER_TYPE_P (type)) { hstate.add_int (TYPE_UNSIGNED (type)); hstate.add_int (TYPE_PRECISION (type)); } if (VECTOR_TYPE_P (type)) { hstate.add_int (TYPE_VECTOR_SUBPARTS (type)); hstate.add_int (TYPE_UNSIGNED (type)); } if (TREE_CODE (type) == COMPLEX_TYPE) hstate.add_int (TYPE_UNSIGNED (type)); /* For pointer and reference types, fold in information about the type pointed to but do not recurse to the pointed-to type. */ if (POINTER_TYPE_P (type)) { hstate.add_int (TYPE_ADDR_SPACE (TREE_TYPE (type))); hstate.add_int (TREE_CODE (TREE_TYPE (type))); } /* For integer types hash only the string flag. */ if (TREE_CODE (type) == INTEGER_TYPE) hstate.add_int (TYPE_STRING_FLAG (type)); /* For array types hash the domain bounds and the string flag. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type)) { hstate.add_int (TYPE_STRING_FLAG (type)); /* OMP lowering can introduce error_mark_node in place of random local decls in types. */ if (TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != error_mark_node) inchash::add_expr (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), hstate); if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node) inchash::add_expr (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), hstate); } /* Recurse for aggregates with a single element type. */ if (TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == COMPLEX_TYPE || TREE_CODE (type) == VECTOR_TYPE) iterative_hash_canonical_type (TREE_TYPE (type), hstate); /* Incorporate function return and argument types. */ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) { unsigned na; tree p; /* For method types also incorporate their parent class. */ if (TREE_CODE (type) == METHOD_TYPE) iterative_hash_canonical_type (TYPE_METHOD_BASETYPE (type), hstate); iterative_hash_canonical_type (TREE_TYPE (type), hstate); for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p)) { iterative_hash_canonical_type (TREE_VALUE (p), hstate); na++; } hstate.add_int (na); } if (RECORD_OR_UNION_TYPE_P (type)) { unsigned nf; tree f; for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f)) if (TREE_CODE (f) == FIELD_DECL) { iterative_hash_canonical_type (TREE_TYPE (f), hstate); nf++; } hstate.add_int (nf); } return hstate.end(); } /* Returning a hash value for gimple type TYPE combined with VAL. */ static void iterative_hash_canonical_type (tree type, inchash::hash &hstate) { hashval_t v; /* An already processed type. */ if (TYPE_CANONICAL (type)) { type = TYPE_CANONICAL (type); v = gimple_canonical_type_hash (type); } else { /* Canonical types should not be able to form SCCs by design, this recursion is just because we do not register canonical types in optimal order. To avoid quadratic behavior also register the type here. */ v = hash_canonical_type (type); gimple_register_canonical_type_1 (type, v); } hstate.add_int (v); } /* Returns the hash for a canonical type P. */ static hashval_t gimple_canonical_type_hash (const void *p) { num_canonical_type_hash_queries++; hashval_t *slot = canonical_type_hash_cache->get ((const_tree) p); gcc_assert (slot != NULL); return *slot; } /* The TYPE_CANONICAL merging machinery. It should closely resemble the middle-end types_compatible_p function. It needs to avoid claiming types are different for types that should be treated the same with respect to TBAA. Canonical types are also used for IL consistency checks via the useless_type_conversion_p predicate which does not handle all type kinds itself but falls back to pointer-comparison of TYPE_CANONICAL for aggregates for example. */ /* Return true iff T1 and T2 are structurally identical for what TBAA is concerned. */ static bool gimple_canonical_types_compatible_p (tree t1, tree t2) { /* Before starting to set up the SCC machinery handle simple cases. */ /* Check first for the obvious case of pointer identity. */ if (t1 == t2) return true; /* Check that we have two types to compare. */ if (t1 == NULL_TREE || t2 == NULL_TREE) return false; /* If the types have been previously registered and found equal they still are. */ if (TYPE_CANONICAL (t1) && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2)) return true; /* Can't be the same type if the types don't have the same code. */ if (TREE_CODE (t1) != TREE_CODE (t2)) return false; /* Qualifiers do not matter for canonical type comparison purposes. */ /* Void types and nullptr types are always the same. */ if (TREE_CODE (t1) == VOID_TYPE || TREE_CODE (t1) == NULLPTR_TYPE) return true; /* Can't be the same type if they have different mode. */ if (TYPE_MODE (t1) != TYPE_MODE (t2)) return false; /* Non-aggregate types can be handled cheaply. */ if (INTEGRAL_TYPE_P (t1) || SCALAR_FLOAT_TYPE_P (t1) || FIXED_POINT_TYPE_P (t1) || TREE_CODE (t1) == VECTOR_TYPE || TREE_CODE (t1) == COMPLEX_TYPE || TREE_CODE (t1) == OFFSET_TYPE || POINTER_TYPE_P (t1)) { /* Can't be the same type if they have different sign or precision. */ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2) || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) return false; if (TREE_CODE (t1) == INTEGER_TYPE && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)) return false; /* For canonical type comparisons we do not want to build SCCs so we cannot compare pointed-to types. But we can, for now, require the same pointed-to type kind and match what useless_type_conversion_p would do. */ if (POINTER_TYPE_P (t1)) { if (TYPE_ADDR_SPACE (TREE_TYPE (t1)) != TYPE_ADDR_SPACE (TREE_TYPE (t2))) return false; if (TREE_CODE (TREE_TYPE (t1)) != TREE_CODE (TREE_TYPE (t2))) return false; } /* Tail-recurse to components. */ if (TREE_CODE (t1) == VECTOR_TYPE || TREE_CODE (t1) == COMPLEX_TYPE) return gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)); return true; } /* Do type-specific comparisons. */ switch (TREE_CODE (t1)) { case ARRAY_TYPE: /* Array types are the same if the element types are the same and the number of elements are the same. */ if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)) || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2) || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2)) return false; else { tree i1 = TYPE_DOMAIN (t1); tree i2 = TYPE_DOMAIN (t2); /* For an incomplete external array, the type domain can be NULL_TREE. Check this condition also. */ if (i1 == NULL_TREE && i2 == NULL_TREE) return true; else if (i1 == NULL_TREE || i2 == NULL_TREE) return false; else { tree min1 = TYPE_MIN_VALUE (i1); tree min2 = TYPE_MIN_VALUE (i2); tree max1 = TYPE_MAX_VALUE (i1); tree max2 = TYPE_MAX_VALUE (i2); /* The minimum/maximum values have to be the same. */ if ((min1 == min2 || (min1 && min2 && ((TREE_CODE (min1) == PLACEHOLDER_EXPR && TREE_CODE (min2) == PLACEHOLDER_EXPR) || operand_equal_p (min1, min2, 0)))) && (max1 == max2 || (max1 && max2 && ((TREE_CODE (max1) == PLACEHOLDER_EXPR && TREE_CODE (max2) == PLACEHOLDER_EXPR) || operand_equal_p (max1, max2, 0))))) return true; else return false; } } case METHOD_TYPE: case FUNCTION_TYPE: /* Function types are the same if the return type and arguments types are the same. */ if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))) return false; if (!comp_type_attributes (t1, t2)) return false; if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2)) return true; else { tree parms1, parms2; for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); parms1 && parms2; parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) { if (!gimple_canonical_types_compatible_p (TREE_VALUE (parms1), TREE_VALUE (parms2))) return false; } if (parms1 || parms2) return false; return true; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree f1, f2; /* For aggregate types, all the fields must be the same. */ for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); f1 || f2; f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) { /* Skip non-fields. */ while (f1 && TREE_CODE (f1) != FIELD_DECL) f1 = TREE_CHAIN (f1); while (f2 && TREE_CODE (f2) != FIELD_DECL) f2 = TREE_CHAIN (f2); if (!f1 || !f2) break; /* The fields must have the same name, offset and type. */ if (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2) || !gimple_compare_field_offset (f1, f2) || !gimple_canonical_types_compatible_p (TREE_TYPE (f1), TREE_TYPE (f2))) return false; } /* If one aggregate has more fields than the other, they are not the same. */ if (f1 || f2) return false; return true; } default: gcc_unreachable (); } } /* Returns nonzero if P1 and P2 are equal. */ static int gimple_canonical_type_eq (const void *p1, const void *p2) { const_tree t1 = (const_tree) p1; const_tree t2 = (const_tree) p2; return gimple_canonical_types_compatible_p (CONST_CAST_TREE (t1), CONST_CAST_TREE (t2)); } /* Main worker for gimple_register_canonical_type. */ static void gimple_register_canonical_type_1 (tree t, hashval_t hash) { void **slot; gcc_checking_assert (TYPE_P (t) && !TYPE_CANONICAL (t)); slot = htab_find_slot_with_hash (gimple_canonical_types, t, hash, INSERT); if (*slot) { tree new_type = (tree)(*slot); gcc_checking_assert (new_type != t); TYPE_CANONICAL (t) = new_type; } else { TYPE_CANONICAL (t) = t; *slot = (void *) t; /* Cache the just computed hash value. */ num_canonical_type_hash_entries++; bool existed_p = canonical_type_hash_cache->put (t, hash); gcc_assert (!existed_p); } } /* Register type T in the global type table gimple_types and set TYPE_CANONICAL of T accordingly. This is used by LTO to merge structurally equivalent types for type-based aliasing purposes across different TUs and languages. ??? This merging does not exactly match how the tree.c middle-end functions will assign TYPE_CANONICAL when new types are created during optimization (which at least happens for pointer and array types). */ static void gimple_register_canonical_type (tree t) { if (TYPE_CANONICAL (t)) return; gimple_register_canonical_type_1 (t, hash_canonical_type (t)); } /* Re-compute TYPE_CANONICAL for NODE and related types. */ static void lto_register_canonical_types (tree node, bool first_p) { if (!node || !TYPE_P (node)) return; if (first_p) TYPE_CANONICAL (node) = NULL_TREE; if (POINTER_TYPE_P (node) || TREE_CODE (node) == COMPLEX_TYPE || TREE_CODE (node) == ARRAY_TYPE) lto_register_canonical_types (TREE_TYPE (node), first_p); if (!first_p) gimple_register_canonical_type (node); } /* Remember trees that contains references to declarations. */ static GTY(()) vec <tree, va_gc> *tree_with_vars; #define CHECK_VAR(tt) \ do \ { \ if ((tt) && VAR_OR_FUNCTION_DECL_P (tt) \ && (TREE_PUBLIC (tt) || DECL_EXTERNAL (tt))) \ return true; \ } while (0) #define CHECK_NO_VAR(tt) \ gcc_checking_assert (!(tt) || !VAR_OR_FUNCTION_DECL_P (tt)) /* Check presence of pointers to decls in fields of a tree_typed T. */ static inline bool mentions_vars_p_typed (tree t) { CHECK_NO_VAR (TREE_TYPE (t)); return false; } /* Check presence of pointers to decls in fields of a tree_common T. */ static inline bool mentions_vars_p_common (tree t) { if (mentions_vars_p_typed (t)) return true; CHECK_NO_VAR (TREE_CHAIN (t)); return false; } /* Check presence of pointers to decls in fields of a decl_minimal T. */ static inline bool mentions_vars_p_decl_minimal (tree t) { if (mentions_vars_p_common (t)) return true; CHECK_NO_VAR (DECL_NAME (t)); CHECK_VAR (DECL_CONTEXT (t)); return false; } /* Check presence of pointers to decls in fields of a decl_common T. */ static inline bool mentions_vars_p_decl_common (tree t) { if (mentions_vars_p_decl_minimal (t)) return true; CHECK_VAR (DECL_SIZE (t)); CHECK_VAR (DECL_SIZE_UNIT (t)); CHECK_VAR (DECL_INITIAL (t)); CHECK_NO_VAR (DECL_ATTRIBUTES (t)); CHECK_VAR (DECL_ABSTRACT_ORIGIN (t)); return false; } /* Check presence of pointers to decls in fields of a decl_with_vis T. */ static inline bool mentions_vars_p_decl_with_vis (tree t) { if (mentions_vars_p_decl_common (t)) return true; /* Accessor macro has side-effects, use field-name here. */ CHECK_NO_VAR (t->decl_with_vis.assembler_name); return false; } /* Check presence of pointers to decls in fields of a decl_non_common T. */ static inline bool mentions_vars_p_decl_non_common (tree t) { if (mentions_vars_p_decl_with_vis (t)) return true; CHECK_NO_VAR (DECL_RESULT_FLD (t)); return false; } /* Check presence of pointers to decls in fields of a decl_non_common T. */ static bool mentions_vars_p_function (tree t) { if (mentions_vars_p_decl_non_common (t)) return true; CHECK_NO_VAR (DECL_ARGUMENTS (t)); CHECK_NO_VAR (DECL_VINDEX (t)); CHECK_VAR (DECL_FUNCTION_PERSONALITY (t)); return false; } /* Check presence of pointers to decls in fields of a field_decl T. */ static bool mentions_vars_p_field_decl (tree t) { if (mentions_vars_p_decl_common (t)) return true; CHECK_VAR (DECL_FIELD_OFFSET (t)); CHECK_NO_VAR (DECL_BIT_FIELD_TYPE (t)); CHECK_NO_VAR (DECL_QUALIFIER (t)); CHECK_NO_VAR (DECL_FIELD_BIT_OFFSET (t)); CHECK_NO_VAR (DECL_FCONTEXT (t)); return false; } /* Check presence of pointers to decls in fields of a type T. */ static bool mentions_vars_p_type (tree t) { if (mentions_vars_p_common (t)) return true; CHECK_NO_VAR (TYPE_CACHED_VALUES (t)); CHECK_VAR (TYPE_SIZE (t)); CHECK_VAR (TYPE_SIZE_UNIT (t)); CHECK_NO_VAR (TYPE_ATTRIBUTES (t)); CHECK_NO_VAR (TYPE_NAME (t)); CHECK_VAR (TYPE_MINVAL (t)); CHECK_VAR (TYPE_MAXVAL (t)); /* Accessor is for derived node types only. */ CHECK_NO_VAR (t->type_non_common.binfo); CHECK_VAR (TYPE_CONTEXT (t)); CHECK_NO_VAR (TYPE_CANONICAL (t)); CHECK_NO_VAR (TYPE_MAIN_VARIANT (t)); CHECK_NO_VAR (TYPE_NEXT_VARIANT (t)); return false; } /* Check presence of pointers to decls in fields of a BINFO T. */ static bool mentions_vars_p_binfo (tree t) { unsigned HOST_WIDE_INT i, n; if (mentions_vars_p_common (t)) return true; CHECK_VAR (BINFO_VTABLE (t)); CHECK_NO_VAR (BINFO_OFFSET (t)); CHECK_NO_VAR (BINFO_VIRTUALS (t)); CHECK_NO_VAR (BINFO_VPTR_FIELD (t)); n = vec_safe_length (BINFO_BASE_ACCESSES (t)); for (i = 0; i < n; i++) CHECK_NO_VAR (BINFO_BASE_ACCESS (t, i)); /* Do not walk BINFO_INHERITANCE_CHAIN, BINFO_SUBVTT_INDEX and BINFO_VPTR_INDEX; these are used by C++ FE only. */ n = BINFO_N_BASE_BINFOS (t); for (i = 0; i < n; i++) CHECK_NO_VAR (BINFO_BASE_BINFO (t, i)); return false; } /* Check presence of pointers to decls in fields of a CONSTRUCTOR T. */ static bool mentions_vars_p_constructor (tree t) { unsigned HOST_WIDE_INT idx; constructor_elt *ce; if (mentions_vars_p_typed (t)) return true; for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (t), idx, &ce); idx++) { CHECK_NO_VAR (ce->index); CHECK_VAR (ce->value); } return false; } /* Check presence of pointers to decls in fields of an expression tree T. */ static bool mentions_vars_p_expr (tree t) { int i; if (mentions_vars_p_typed (t)) return true; for (i = TREE_OPERAND_LENGTH (t) - 1; i >= 0; --i) CHECK_VAR (TREE_OPERAND (t, i)); return false; } /* Check presence of pointers to decls in fields of an OMP_CLAUSE T. */ static bool mentions_vars_p_omp_clause (tree t) { int i; if (mentions_vars_p_common (t)) return true; for (i = omp_clause_num_ops[OMP_CLAUSE_CODE (t)] - 1; i >= 0; --i) CHECK_VAR (OMP_CLAUSE_OPERAND (t, i)); return false; } /* Check presence of pointers to decls that needs later fixup in T. */ static bool mentions_vars_p (tree t) { switch (TREE_CODE (t)) { case IDENTIFIER_NODE: break; case TREE_LIST: CHECK_VAR (TREE_VALUE (t)); CHECK_VAR (TREE_PURPOSE (t)); CHECK_NO_VAR (TREE_CHAIN (t)); break; case FIELD_DECL: return mentions_vars_p_field_decl (t); case LABEL_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case IMPORTED_DECL: case NAMESPACE_DECL: case NAMELIST_DECL: return mentions_vars_p_decl_common (t); case VAR_DECL: return mentions_vars_p_decl_with_vis (t); case TYPE_DECL: return mentions_vars_p_decl_non_common (t); case FUNCTION_DECL: return mentions_vars_p_function (t); case TREE_BINFO: return mentions_vars_p_binfo (t); case PLACEHOLDER_EXPR: return mentions_vars_p_common (t); case BLOCK: case TRANSLATION_UNIT_DECL: case OPTIMIZATION_NODE: case TARGET_OPTION_NODE: break; case CONSTRUCTOR: return mentions_vars_p_constructor (t); case OMP_CLAUSE: return mentions_vars_p_omp_clause (t); default: if (TYPE_P (t)) { if (mentions_vars_p_type (t)) return true; } else if (EXPR_P (t)) { if (mentions_vars_p_expr (t)) return true; } else if (CONSTANT_CLASS_P (t)) CHECK_NO_VAR (TREE_TYPE (t)); else gcc_unreachable (); } return false; } /* Return the resolution for the decl with index INDEX from DATA_IN. */ static enum ld_plugin_symbol_resolution get_resolution (struct data_in *data_in, unsigned index) { if (data_in->globals_resolution.exists ()) { ld_plugin_symbol_resolution_t ret; /* We can have references to not emitted functions in DECL_FUNCTION_PERSONALITY at least. So we can and have to indeed return LDPR_UNKNOWN in some cases. */ if (data_in->globals_resolution.length () <= index) return LDPR_UNKNOWN; ret = data_in->globals_resolution[index]; return ret; } else /* Delay resolution finding until decl merging. */ return LDPR_UNKNOWN; } /* We need to record resolutions until symbol table is read. */ static void register_resolution (struct lto_file_decl_data *file_data, tree decl, enum ld_plugin_symbol_resolution resolution) { if (resolution == LDPR_UNKNOWN) return; if (!file_data->resolution_map) file_data->resolution_map = new hash_map<tree, ld_plugin_symbol_resolution>; file_data->resolution_map->put (decl, resolution); } /* Register DECL with the global symbol table and change its name if necessary to avoid name clashes for static globals across different files. */ static void lto_register_var_decl_in_symtab (struct data_in *data_in, tree decl, unsigned ix) { tree context; /* Variable has file scope, not local. */ if (!TREE_PUBLIC (decl) && !((context = decl_function_context (decl)) && auto_var_in_fn_p (decl, context))) rest_of_decl_compilation (decl, 1, 0); /* If this variable has already been declared, queue the declaration for merging. */ if (TREE_PUBLIC (decl)) register_resolution (data_in->file_data, decl, get_resolution (data_in, ix)); } /* Register DECL with the global symbol table and change its name if necessary to avoid name clashes for static globals across different files. DATA_IN contains descriptors and tables for the file being read. */ static void lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl, unsigned ix) { /* If this variable has already been declared, queue the declaration for merging. */ if (TREE_PUBLIC (decl) && !DECL_ABSTRACT_P (decl)) register_resolution (data_in->file_data, decl, get_resolution (data_in, ix)); } /* For the type T re-materialize it in the type variant list and the pointer/reference-to chains. */ static void lto_fixup_prevailing_type (tree t) { /* The following re-creates proper variant lists while fixing up the variant leaders. We do not stream TYPE_NEXT_VARIANT so the variant list state before fixup is broken. */ /* If we are not our own variant leader link us into our new leaders variant list. */ if (TYPE_MAIN_VARIANT (t) != t) { tree mv = TYPE_MAIN_VARIANT (t); TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (mv); TYPE_NEXT_VARIANT (mv) = t; } /* The following reconstructs the pointer chains of the new pointed-to type if we are a main variant. We do not stream those so they are broken before fixup. */ if (TREE_CODE (t) == POINTER_TYPE && TYPE_MAIN_VARIANT (t) == t) { TYPE_NEXT_PTR_TO (t) = TYPE_POINTER_TO (TREE_TYPE (t)); TYPE_POINTER_TO (TREE_TYPE (t)) = t; } else if (TREE_CODE (t) == REFERENCE_TYPE && TYPE_MAIN_VARIANT (t) == t) { TYPE_NEXT_REF_TO (t) = TYPE_REFERENCE_TO (TREE_TYPE (t)); TYPE_REFERENCE_TO (TREE_TYPE (t)) = t; } } /* We keep prevailing tree SCCs in a hashtable with manual collision handling (in case all hashes compare the same) and keep the colliding entries in the tree_scc->next chain. */ struct tree_scc { tree_scc *next; /* Hash of the whole SCC. */ hashval_t hash; /* Number of trees in the SCC. */ unsigned len; /* Number of possible entries into the SCC (tree nodes [0..entry_len-1] which share the same individual tree hash). */ unsigned entry_len; /* The members of the SCC. We only need to remember the first entry node candidate for prevailing SCCs (but of course have access to all entries for SCCs we are processing). ??? For prevailing SCCs we really only need hash and the first entry candidate, but that's too awkward to implement. */ tree entries[1]; }; struct tree_scc_hasher : typed_noop_remove <tree_scc> { typedef tree_scc value_type; typedef tree_scc compare_type; static inline hashval_t hash (const value_type *); static inline bool equal (const value_type *, const compare_type *); }; hashval_t tree_scc_hasher::hash (const value_type *scc) { return scc->hash; } bool tree_scc_hasher::equal (const value_type *scc1, const compare_type *scc2) { if (scc1->hash != scc2->hash || scc1->len != scc2->len || scc1->entry_len != scc2->entry_len) return false; return true; } static hash_table<tree_scc_hasher> *tree_scc_hash; static struct obstack tree_scc_hash_obstack; static unsigned long num_merged_types; static unsigned long num_prevailing_types; static unsigned long num_type_scc_trees; static unsigned long total_scc_size; static unsigned long num_sccs_read; static unsigned long total_scc_size_merged; static unsigned long num_sccs_merged; static unsigned long num_scc_compares; static unsigned long num_scc_compare_collisions; /* Compare the two entries T1 and T2 of two SCCs that are possibly equal, recursing through in-SCC tree edges. Returns true if the SCCs entered through T1 and T2 are equal and fills in *MAP with the pairs of SCC entries we visited, starting with (*MAP)[0] = T1 and (*MAP)[1] = T2. */ static bool compare_tree_sccs_1 (tree t1, tree t2, tree **map) { enum tree_code code; /* Mark already visited nodes. */ TREE_ASM_WRITTEN (t2) = 1; /* Push the pair onto map. */ (*map)[0] = t1; (*map)[1] = t2; *map = *map + 2; /* Compare value-fields. */ #define compare_values(X) \ do { \ if (X(t1) != X(t2)) \ return false; \ } while (0) compare_values (TREE_CODE); code = TREE_CODE (t1); if (!TYPE_P (t1)) { compare_values (TREE_SIDE_EFFECTS); compare_values (TREE_CONSTANT); compare_values (TREE_READONLY); compare_values (TREE_PUBLIC); } compare_values (TREE_ADDRESSABLE); compare_values (TREE_THIS_VOLATILE); if (DECL_P (t1)) compare_values (DECL_UNSIGNED); else if (TYPE_P (t1)) compare_values (TYPE_UNSIGNED); if (TYPE_P (t1)) compare_values (TYPE_ARTIFICIAL); else compare_values (TREE_NO_WARNING); compare_values (TREE_NOTHROW); compare_values (TREE_STATIC); if (code != TREE_BINFO) compare_values (TREE_PRIVATE); compare_values (TREE_PROTECTED); compare_values (TREE_DEPRECATED); if (TYPE_P (t1)) { compare_values (TYPE_SATURATING); compare_values (TYPE_ADDR_SPACE); } else if (code == SSA_NAME) compare_values (SSA_NAME_IS_DEFAULT_DEF); if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { if (!wi::eq_p (t1, t2)) return false; } if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST)) { /* ??? No suitable compare routine available. */ REAL_VALUE_TYPE r1 = TREE_REAL_CST (t1); REAL_VALUE_TYPE r2 = TREE_REAL_CST (t2); if (r1.cl != r2.cl || r1.decimal != r2.decimal || r1.sign != r2.sign || r1.signalling != r2.signalling || r1.canonical != r2.canonical || r1.uexp != r2.uexp) return false; for (unsigned i = 0; i < SIGSZ; ++i) if (r1.sig[i] != r2.sig[i]) return false; } if (CODE_CONTAINS_STRUCT (code, TS_FIXED_CST)) if (!fixed_compare (EQ_EXPR, TREE_FIXED_CST_PTR (t1), TREE_FIXED_CST_PTR (t2))) return false; /* We don't want to compare locations, so there is nothing do compare for TS_DECL_MINIMAL. */ if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON)) { compare_values (DECL_MODE); compare_values (DECL_NONLOCAL); compare_values (DECL_VIRTUAL_P); compare_values (DECL_IGNORED_P); compare_values (DECL_ABSTRACT_P); compare_values (DECL_ARTIFICIAL); compare_values (DECL_USER_ALIGN); compare_values (DECL_PRESERVE_P); compare_values (DECL_EXTERNAL); compare_values (DECL_GIMPLE_REG_P); compare_values (DECL_ALIGN); if (code == LABEL_DECL) { compare_values (EH_LANDING_PAD_NR); compare_values (LABEL_DECL_UID); } else if (code == FIELD_DECL) { compare_values (DECL_PACKED); compare_values (DECL_NONADDRESSABLE_P); compare_values (DECL_OFFSET_ALIGN); } else if (code == VAR_DECL) { compare_values (DECL_HAS_DEBUG_EXPR_P); compare_values (DECL_NONLOCAL_FRAME); } if (code == RESULT_DECL || code == PARM_DECL || code == VAR_DECL) { compare_values (DECL_BY_REFERENCE); if (code == VAR_DECL || code == PARM_DECL) compare_values (DECL_HAS_VALUE_EXPR_P); } } if (CODE_CONTAINS_STRUCT (code, TS_DECL_WRTL)) compare_values (DECL_REGISTER); if (CODE_CONTAINS_STRUCT (code, TS_DECL_WITH_VIS)) { compare_values (DECL_COMMON); compare_values (DECL_DLLIMPORT_P); compare_values (DECL_WEAK); compare_values (DECL_SEEN_IN_BIND_EXPR_P); compare_values (DECL_COMDAT); compare_values (DECL_VISIBILITY); compare_values (DECL_VISIBILITY_SPECIFIED); if (code == VAR_DECL) { compare_values (DECL_HARD_REGISTER); /* DECL_IN_TEXT_SECTION is set during final asm output only. */ compare_values (DECL_IN_CONSTANT_POOL); } } if (CODE_CONTAINS_STRUCT (code, TS_FUNCTION_DECL)) { compare_values (DECL_BUILT_IN_CLASS); compare_values (DECL_STATIC_CONSTRUCTOR); compare_values (DECL_STATIC_DESTRUCTOR); compare_values (DECL_UNINLINABLE); compare_values (DECL_POSSIBLY_INLINED); compare_values (DECL_IS_NOVOPS); compare_values (DECL_IS_RETURNS_TWICE); compare_values (DECL_IS_MALLOC); compare_values (DECL_IS_OPERATOR_NEW); compare_values (DECL_DECLARED_INLINE_P); compare_values (DECL_STATIC_CHAIN); compare_values (DECL_NO_INLINE_WARNING_P); compare_values (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT); compare_values (DECL_NO_LIMIT_STACK); compare_values (DECL_DISREGARD_INLINE_LIMITS); compare_values (DECL_PURE_P); compare_values (DECL_LOOPING_CONST_OR_PURE_P); compare_values (DECL_FINAL_P); compare_values (DECL_CXX_CONSTRUCTOR_P); compare_values (DECL_CXX_DESTRUCTOR_P); if (DECL_BUILT_IN_CLASS (t1) != NOT_BUILT_IN) compare_values (DECL_FUNCTION_CODE); } if (CODE_CONTAINS_STRUCT (code, TS_TYPE_COMMON)) { compare_values (TYPE_MODE); compare_values (TYPE_STRING_FLAG); compare_values (TYPE_NO_FORCE_BLK); compare_values (TYPE_NEEDS_CONSTRUCTING); if (RECORD_OR_UNION_TYPE_P (t1)) { compare_values (TYPE_TRANSPARENT_AGGR); compare_values (TYPE_FINAL_P); } else if (code == ARRAY_TYPE) compare_values (TYPE_NONALIASED_COMPONENT); compare_values (TYPE_PACKED); compare_values (TYPE_RESTRICT); compare_values (TYPE_USER_ALIGN); compare_values (TYPE_READONLY); compare_values (TYPE_PRECISION); compare_values (TYPE_ALIGN); compare_values (TYPE_ALIAS_SET); } /* We don't want to compare locations, so there is nothing do compare for TS_EXP. */ /* BLOCKs are function local and we don't merge anything there, so simply refuse to merge. */ if (CODE_CONTAINS_STRUCT (code, TS_BLOCK)) return false; if (CODE_CONTAINS_STRUCT (code, TS_TRANSLATION_UNIT_DECL)) if (strcmp (TRANSLATION_UNIT_LANGUAGE (t1), TRANSLATION_UNIT_LANGUAGE (t2)) != 0) return false; if (CODE_CONTAINS_STRUCT (code, TS_TARGET_OPTION)) if (!cl_target_option_eq (TREE_TARGET_OPTION (t1), TREE_TARGET_OPTION (t2))) return false; if (CODE_CONTAINS_STRUCT (code, TS_OPTIMIZATION)) if (memcmp (TREE_OPTIMIZATION (t1), TREE_OPTIMIZATION (t2), sizeof (struct cl_optimization)) != 0) return false; if (CODE_CONTAINS_STRUCT (code, TS_BINFO)) if (vec_safe_length (BINFO_BASE_ACCESSES (t1)) != vec_safe_length (BINFO_BASE_ACCESSES (t2))) return false; if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR)) compare_values (CONSTRUCTOR_NELTS); if (CODE_CONTAINS_STRUCT (code, TS_IDENTIFIER)) if (IDENTIFIER_LENGTH (t1) != IDENTIFIER_LENGTH (t2) || memcmp (IDENTIFIER_POINTER (t1), IDENTIFIER_POINTER (t2), IDENTIFIER_LENGTH (t1)) != 0) return false; if (CODE_CONTAINS_STRUCT (code, TS_STRING)) if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2) || memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2), TREE_STRING_LENGTH (t1)) != 0) return false; if (code == OMP_CLAUSE) { compare_values (OMP_CLAUSE_CODE); switch (OMP_CLAUSE_CODE (t1)) { case OMP_CLAUSE_DEFAULT: compare_values (OMP_CLAUSE_DEFAULT_KIND); break; case OMP_CLAUSE_SCHEDULE: compare_values (OMP_CLAUSE_SCHEDULE_KIND); break; case OMP_CLAUSE_DEPEND: compare_values (OMP_CLAUSE_DEPEND_KIND); break; case OMP_CLAUSE_MAP: compare_values (OMP_CLAUSE_MAP_KIND); break; case OMP_CLAUSE_PROC_BIND: compare_values (OMP_CLAUSE_PROC_BIND_KIND); break; case OMP_CLAUSE_REDUCTION: compare_values (OMP_CLAUSE_REDUCTION_CODE); compare_values (OMP_CLAUSE_REDUCTION_GIMPLE_INIT); compare_values (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE); break; default: break; } } #undef compare_values /* Compare pointer fields. */ /* Recurse. Search & Replaced from DFS_write_tree_body. Folding the early checks into the compare_tree_edges recursion macro makes debugging way quicker as you are able to break on compare_tree_sccs_1 and simply finish until a call returns false to spot the SCC members with the difference. */ #define compare_tree_edges(E1, E2) \ do { \ tree t1_ = (E1), t2_ = (E2); \ if (t1_ != t2_ \ && (!t1_ || !t2_ \ || !TREE_VISITED (t2_) \ || (!TREE_ASM_WRITTEN (t2_) \ && !compare_tree_sccs_1 (t1_, t2_, map)))) \ return false; \ /* Only non-NULL trees outside of the SCC may compare equal. */ \ gcc_checking_assert (t1_ != t2_ || (!t2_ || !TREE_VISITED (t2_))); \ } while (0) if (CODE_CONTAINS_STRUCT (code, TS_TYPED)) { if (code != IDENTIFIER_NODE) compare_tree_edges (TREE_TYPE (t1), TREE_TYPE (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_VECTOR)) { unsigned i; /* Note that the number of elements for EXPR has already been emitted in EXPR's header (see streamer_write_tree_header). */ for (i = 0; i < VECTOR_CST_NELTS (t1); ++i) compare_tree_edges (VECTOR_CST_ELT (t1, i), VECTOR_CST_ELT (t2, i)); } if (CODE_CONTAINS_STRUCT (code, TS_COMPLEX)) { compare_tree_edges (TREE_REALPART (t1), TREE_REALPART (t2)); compare_tree_edges (TREE_IMAGPART (t1), TREE_IMAGPART (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_MINIMAL)) { compare_tree_edges (DECL_NAME (t1), DECL_NAME (t2)); /* ??? Global decls from different TUs have non-matching TRANSLATION_UNIT_DECLs. Only consider a small set of decls equivalent, we should not end up merging others. */ if ((code == TYPE_DECL || code == NAMESPACE_DECL || code == IMPORTED_DECL || code == CONST_DECL || (VAR_OR_FUNCTION_DECL_P (t1) && (TREE_PUBLIC (t1) || DECL_EXTERNAL (t1)))) && DECL_FILE_SCOPE_P (t1) && DECL_FILE_SCOPE_P (t2)) ; else compare_tree_edges (DECL_CONTEXT (t1), DECL_CONTEXT (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON)) { compare_tree_edges (DECL_SIZE (t1), DECL_SIZE (t2)); compare_tree_edges (DECL_SIZE_UNIT (t1), DECL_SIZE_UNIT (t2)); compare_tree_edges (DECL_ATTRIBUTES (t1), DECL_ATTRIBUTES (t2)); if ((code == VAR_DECL || code == PARM_DECL) && DECL_HAS_VALUE_EXPR_P (t1)) compare_tree_edges (DECL_VALUE_EXPR (t1), DECL_VALUE_EXPR (t2)); if (code == VAR_DECL && DECL_HAS_DEBUG_EXPR_P (t1)) compare_tree_edges (DECL_DEBUG_EXPR (t1), DECL_DEBUG_EXPR (t2)); /* LTO specific edges. */ if (code != FUNCTION_DECL && code != TRANSLATION_UNIT_DECL) compare_tree_edges (DECL_INITIAL (t1), DECL_INITIAL (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_NON_COMMON)) { if (code == FUNCTION_DECL) { tree a1, a2; for (a1 = DECL_ARGUMENTS (t1), a2 = DECL_ARGUMENTS (t2); a1 || a2; a1 = TREE_CHAIN (a1), a2 = TREE_CHAIN (a2)) compare_tree_edges (a1, a2); compare_tree_edges (DECL_RESULT (t1), DECL_RESULT (t2)); } else if (code == TYPE_DECL) compare_tree_edges (DECL_ORIGINAL_TYPE (t1), DECL_ORIGINAL_TYPE (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_WITH_VIS)) { /* Make sure we don't inadvertently set the assembler name. */ if (DECL_ASSEMBLER_NAME_SET_P (t1)) compare_tree_edges (DECL_ASSEMBLER_NAME (t1), DECL_ASSEMBLER_NAME (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_FIELD_DECL)) { compare_tree_edges (DECL_FIELD_OFFSET (t1), DECL_FIELD_OFFSET (t2)); compare_tree_edges (DECL_BIT_FIELD_TYPE (t1), DECL_BIT_FIELD_TYPE (t2)); compare_tree_edges (DECL_BIT_FIELD_REPRESENTATIVE (t1), DECL_BIT_FIELD_REPRESENTATIVE (t2)); compare_tree_edges (DECL_FIELD_BIT_OFFSET (t1), DECL_FIELD_BIT_OFFSET (t2)); compare_tree_edges (DECL_FCONTEXT (t1), DECL_FCONTEXT (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_FUNCTION_DECL)) { compare_tree_edges (DECL_FUNCTION_PERSONALITY (t1), DECL_FUNCTION_PERSONALITY (t2)); compare_tree_edges (DECL_VINDEX (t1), DECL_VINDEX (t2)); compare_tree_edges (DECL_FUNCTION_SPECIFIC_TARGET (t1), DECL_FUNCTION_SPECIFIC_TARGET (t2)); compare_tree_edges (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (t1), DECL_FUNCTION_SPECIFIC_OPTIMIZATION (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_TYPE_COMMON)) { compare_tree_edges (TYPE_SIZE (t1), TYPE_SIZE (t2)); compare_tree_edges (TYPE_SIZE_UNIT (t1), TYPE_SIZE_UNIT (t2)); compare_tree_edges (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)); compare_tree_edges (TYPE_NAME (t1), TYPE_NAME (t2)); /* Do not compare TYPE_POINTER_TO or TYPE_REFERENCE_TO. They will be reconstructed during fixup. */ /* Do not compare TYPE_NEXT_VARIANT, we reconstruct the variant lists during fixup. */ compare_tree_edges (TYPE_MAIN_VARIANT (t1), TYPE_MAIN_VARIANT (t2)); /* ??? Global types from different TUs have non-matching TRANSLATION_UNIT_DECLs. Still merge them if they are otherwise equal. */ if (TYPE_FILE_SCOPE_P (t1) && TYPE_FILE_SCOPE_P (t2)) ; else compare_tree_edges (TYPE_CONTEXT (t1), TYPE_CONTEXT (t2)); /* TYPE_CANONICAL is re-computed during type merging, so do not compare it here. */ compare_tree_edges (TYPE_STUB_DECL (t1), TYPE_STUB_DECL (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_TYPE_NON_COMMON)) { if (code == ENUMERAL_TYPE) compare_tree_edges (TYPE_VALUES (t1), TYPE_VALUES (t2)); else if (code == ARRAY_TYPE) compare_tree_edges (TYPE_DOMAIN (t1), TYPE_DOMAIN (t2)); else if (RECORD_OR_UNION_TYPE_P (t1)) { tree f1, f2; for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); f1 || f2; f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) compare_tree_edges (f1, f2); compare_tree_edges (TYPE_BINFO (t1), TYPE_BINFO (t2)); } else if (code == FUNCTION_TYPE || code == METHOD_TYPE) compare_tree_edges (TYPE_ARG_TYPES (t1), TYPE_ARG_TYPES (t2)); if (!POINTER_TYPE_P (t1)) compare_tree_edges (TYPE_MINVAL (t1), TYPE_MINVAL (t2)); compare_tree_edges (TYPE_MAXVAL (t1), TYPE_MAXVAL (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_LIST)) { compare_tree_edges (TREE_PURPOSE (t1), TREE_PURPOSE (t2)); compare_tree_edges (TREE_VALUE (t1), TREE_VALUE (t2)); compare_tree_edges (TREE_CHAIN (t1), TREE_CHAIN (t2)); } if (CODE_CONTAINS_STRUCT (code, TS_VEC)) for (int i = 0; i < TREE_VEC_LENGTH (t1); i++) compare_tree_edges (TREE_VEC_ELT (t1, i), TREE_VEC_ELT (t2, i)); if (CODE_CONTAINS_STRUCT (code, TS_EXP)) { for (int i = 0; i < TREE_OPERAND_LENGTH (t1); i++) compare_tree_edges (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)); /* BLOCKs are function local and we don't merge anything there. */ if (TREE_BLOCK (t1) || TREE_BLOCK (t2)) return false; } if (CODE_CONTAINS_STRUCT (code, TS_BINFO)) { unsigned i; tree t; /* Lengths have already been compared above. */ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (t1), i, t) compare_tree_edges (t, BINFO_BASE_BINFO (t2, i)); FOR_EACH_VEC_SAFE_ELT (BINFO_BASE_ACCESSES (t1), i, t) compare_tree_edges (t, BINFO_BASE_ACCESS (t2, i)); compare_tree_edges (BINFO_OFFSET (t1), BINFO_OFFSET (t2)); compare_tree_edges (BINFO_VTABLE (t1), BINFO_VTABLE (t2)); compare_tree_edges (BINFO_VPTR_FIELD (t1), BINFO_VPTR_FIELD (t2)); /* Do not walk BINFO_INHERITANCE_CHAIN, BINFO_SUBVTT_INDEX and BINFO_VPTR_INDEX; these are used by C++ FE only. */ } if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR)) { unsigned i; tree index, value; /* Lengths have already been compared above. */ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, index, value) { compare_tree_edges (index, CONSTRUCTOR_ELT (t2, i)->index); compare_tree_edges (value, CONSTRUCTOR_ELT (t2, i)->value); } } if (code == OMP_CLAUSE) { int i; for (i = 0; i < omp_clause_num_ops[OMP_CLAUSE_CODE (t1)]; i++) compare_tree_edges (OMP_CLAUSE_OPERAND (t1, i), OMP_CLAUSE_OPERAND (t2, i)); compare_tree_edges (OMP_CLAUSE_CHAIN (t1), OMP_CLAUSE_CHAIN (t2)); } #undef compare_tree_edges return true; } /* Compare the tree scc SCC to the prevailing candidate PSCC, filling out MAP if they are equal. */ static bool compare_tree_sccs (tree_scc *pscc, tree_scc *scc, tree *map) { /* Assume SCC entry hashes are sorted after their cardinality. Which means we can simply take the first n-tuple of equal hashes (which is recorded as entry_len) and do n SCC entry candidate comparisons. */ for (unsigned i = 0; i < pscc->entry_len; ++i) { tree *mapp = map; num_scc_compare_collisions++; if (compare_tree_sccs_1 (pscc->entries[0], scc->entries[i], &mapp)) { /* Equal - no need to reset TREE_VISITED or TREE_ASM_WRITTEN on the scc as all trees will be freed. */ return true; } /* Reset TREE_ASM_WRITTEN on scc for the next compare or in case the SCC prevails. */ for (unsigned j = 0; j < scc->len; ++j) TREE_ASM_WRITTEN (scc->entries[j]) = 0; } return false; } /* QSort sort function to sort a map of two pointers after the 2nd pointer. */ static int cmp_tree (const void *p1_, const void *p2_) { tree *p1 = (tree *)(const_cast<void *>(p1_)); tree *p2 = (tree *)(const_cast<void *>(p2_)); if (p1[1] == p2[1]) return 0; return ((uintptr_t)p1[1] < (uintptr_t)p2[1]) ? -1 : 1; } /* Try to unify the SCC with nodes FROM to FROM + LEN in CACHE and hash value SCC_HASH with an already recorded SCC. Return true if that was successful, otherwise return false. */ static bool unify_scc (struct data_in *data_in, unsigned from, unsigned len, unsigned scc_entry_len, hashval_t scc_hash) { bool unified_p = false; struct streamer_tree_cache_d *cache = data_in->reader_cache; tree_scc *scc = (tree_scc *) alloca (sizeof (tree_scc) + (len - 1) * sizeof (tree)); scc->next = NULL; scc->hash = scc_hash; scc->len = len; scc->entry_len = scc_entry_len; for (unsigned i = 0; i < len; ++i) { tree t = streamer_tree_cache_get_tree (cache, from + i); scc->entries[i] = t; /* Do not merge SCCs with local entities inside them. Also do not merge TRANSLATION_UNIT_DECLs. */ if (TREE_CODE (t) == TRANSLATION_UNIT_DECL || (VAR_OR_FUNCTION_DECL_P (t) && !(TREE_PUBLIC (t) || DECL_EXTERNAL (t))) || TREE_CODE (t) == LABEL_DECL) { /* Avoid doing any work for these cases and do not worry to record the SCCs for further merging. */ return false; } } /* Look for the list of candidate SCCs to compare against. */ tree_scc **slot; slot = tree_scc_hash->find_slot_with_hash (scc, scc_hash, INSERT); if (*slot) { /* Try unifying against each candidate. */ num_scc_compares++; /* Set TREE_VISITED on the scc so we can easily identify tree nodes outside of the scc when following tree edges. Make sure that TREE_ASM_WRITTEN is unset so we can use it as 2nd bit to track whether we visited the SCC member during the compare. We cannot use TREE_VISITED on the pscc members as the extended scc and pscc can overlap. */ for (unsigned i = 0; i < scc->len; ++i) { TREE_VISITED (scc->entries[i]) = 1; gcc_checking_assert (!TREE_ASM_WRITTEN (scc->entries[i])); } tree *map = XALLOCAVEC (tree, 2 * len); for (tree_scc *pscc = *slot; pscc; pscc = pscc->next) { if (!compare_tree_sccs (pscc, scc, map)) continue; /* Found an equal SCC. */ unified_p = true; num_scc_compare_collisions--; num_sccs_merged++; total_scc_size_merged += len; #ifdef ENABLE_CHECKING for (unsigned i = 0; i < len; ++i) { tree t = map[2*i+1]; enum tree_code code = TREE_CODE (t); /* IDENTIFIER_NODEs should be singletons and are merged by the streamer. The others should be singletons, too, and we should not merge them in any way. */ gcc_assert (code != TRANSLATION_UNIT_DECL && code != IDENTIFIER_NODE && !streamer_handle_as_builtin_p (t)); } #endif /* Fixup the streamer cache with the prevailing nodes according to the tree node mapping computed by compare_tree_sccs. */ if (len == 1) streamer_tree_cache_replace_tree (cache, pscc->entries[0], from); else { tree *map2 = XALLOCAVEC (tree, 2 * len); for (unsigned i = 0; i < len; ++i) { map2[i*2] = (tree)(uintptr_t)(from + i); map2[i*2+1] = scc->entries[i]; } qsort (map2, len, 2 * sizeof (tree), cmp_tree); qsort (map, len, 2 * sizeof (tree), cmp_tree); for (unsigned i = 0; i < len; ++i) streamer_tree_cache_replace_tree (cache, map[2*i], (uintptr_t)map2[2*i]); } /* Free the tree nodes from the read SCC. */ data_in->location_cache.revert_location_cache (); for (unsigned i = 0; i < len; ++i) { enum tree_code code; if (TYPE_P (scc->entries[i])) num_merged_types++; code = TREE_CODE (scc->entries[i]); if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR)) vec_free (CONSTRUCTOR_ELTS (scc->entries[i])); ggc_free (scc->entries[i]); } break; } /* Reset TREE_VISITED if we didn't unify the SCC with another. */ if (!unified_p) for (unsigned i = 0; i < scc->len; ++i) TREE_VISITED (scc->entries[i]) = 0; } /* If we didn't unify it to any candidate duplicate the relevant pieces to permanent storage and link it into the chain. */ if (!unified_p) { tree_scc *pscc = XOBNEWVAR (&tree_scc_hash_obstack, tree_scc, sizeof (tree_scc)); memcpy (pscc, scc, sizeof (tree_scc)); pscc->next = (*slot); *slot = pscc; } return unified_p; } /* Read all the symbols from buffer DATA, using descriptors in DECL_DATA. RESOLUTIONS is the set of symbols picked by the linker (read from the resolution file when the linker plugin is being used). */ static void lto_read_decls (struct lto_file_decl_data *decl_data, const void *data, vec<ld_plugin_symbol_resolution_t> resolutions) { const struct lto_decl_header *header = (const struct lto_decl_header *) data; const int decl_offset = sizeof (struct lto_decl_header); const int main_offset = decl_offset + header->decl_state_size; const int string_offset = main_offset + header->main_size; struct data_in *data_in; unsigned int i; const uint32_t *data_ptr, *data_end; uint32_t num_decl_states; lto_input_block ib_main ((const char *) data + main_offset, header->main_size, decl_data->mode_table); data_in = lto_data_in_create (decl_data, (const char *) data + string_offset, header->string_size, resolutions); /* We do not uniquify the pre-loaded cache entries, those are middle-end internal types that should not be merged. */ /* Read the global declarations and types. */ while (ib_main.p < ib_main.len) { tree t; unsigned from = data_in->reader_cache->nodes.length (); /* Read and uniquify SCCs as in the input stream. */ enum LTO_tags tag = streamer_read_record_start (&ib_main); if (tag == LTO_tree_scc) { unsigned len_; unsigned scc_entry_len; hashval_t scc_hash = lto_input_scc (&ib_main, data_in, &len_, &scc_entry_len); unsigned len = data_in->reader_cache->nodes.length () - from; gcc_assert (len == len_); total_scc_size += len; num_sccs_read++; /* We have the special case of size-1 SCCs that are pre-merged by means of identifier and string sharing for example. ??? Maybe we should avoid streaming those as SCCs. */ tree first = streamer_tree_cache_get_tree (data_in->reader_cache, from); if (len == 1 && (TREE_CODE (first) == IDENTIFIER_NODE || TREE_CODE (first) == INTEGER_CST || TREE_CODE (first) == TRANSLATION_UNIT_DECL || streamer_handle_as_builtin_p (first))) continue; /* Try to unify the SCC with already existing ones. */ if (!flag_ltrans && unify_scc (data_in, from, len, scc_entry_len, scc_hash)) continue; /* Tree merging failed, mark entries in location cache as permanent. */ data_in->location_cache.accept_location_cache (); bool seen_type = false; for (unsigned i = 0; i < len; ++i) { tree t = streamer_tree_cache_get_tree (data_in->reader_cache, from + i); /* Reconstruct the type variant and pointer-to/reference-to chains. */ if (TYPE_P (t)) { seen_type = true; num_prevailing_types++; lto_fixup_prevailing_type (t); } /* Compute the canonical type of all types. ??? Should be able to assert that !TYPE_CANONICAL. */ if (TYPE_P (t) && !TYPE_CANONICAL (t)) { gimple_register_canonical_type (t); if (odr_type_p (t)) register_odr_type (t); } /* Link shared INTEGER_CSTs into TYPE_CACHED_VALUEs of its type which is also member of this SCC. */ if (TREE_CODE (t) == INTEGER_CST && !TREE_OVERFLOW (t)) cache_integer_cst (t); /* Register TYPE_DECLs with the debuginfo machinery. */ if (!flag_wpa && TREE_CODE (t) == TYPE_DECL) { /* Dwarf2out needs location information. TODO: Moving this out of the streamer loop may noticealy improve ltrans linemap memory use. */ data_in->location_cache.apply_location_cache (); debug_hooks->type_decl (t, !DECL_FILE_SCOPE_P (t)); } if (!flag_ltrans) { /* Register variables and functions with the symbol table. */ if (TREE_CODE (t) == VAR_DECL) lto_register_var_decl_in_symtab (data_in, t, from + i); else if (TREE_CODE (t) == FUNCTION_DECL && !DECL_BUILT_IN (t)) lto_register_function_decl_in_symtab (data_in, t, from + i); /* Scan the tree for references to global functions or variables and record those for later fixup. */ if (mentions_vars_p (t)) vec_safe_push (tree_with_vars, t); } } if (seen_type) num_type_scc_trees += len; } else { /* Pickle stray references. */ t = lto_input_tree_1 (&ib_main, data_in, tag, 0); gcc_assert (t && data_in->reader_cache->nodes.length () == from); } } data_in->location_cache.apply_location_cache (); /* Read in lto_in_decl_state objects. */ data_ptr = (const uint32_t *) ((const char*) data + decl_offset); data_end = (const uint32_t *) ((const char*) data_ptr + header->decl_state_size); num_decl_states = *data_ptr++; gcc_assert (num_decl_states > 0); decl_data->global_decl_state = lto_new_in_decl_state (); data_ptr = lto_read_in_decl_state (data_in, data_ptr, decl_data->global_decl_state); /* Read in per-function decl states and enter them in hash table. */ decl_data->function_decl_states = hash_table<decl_state_hasher>::create_ggc (37); for (i = 1; i < num_decl_states; i++) { struct lto_in_decl_state *state = lto_new_in_decl_state (); data_ptr = lto_read_in_decl_state (data_in, data_ptr, state); lto_in_decl_state **slot = decl_data->function_decl_states->find_slot (state, INSERT); gcc_assert (*slot == NULL); *slot = state; } if (data_ptr != data_end) internal_error ("bytecode stream: garbage at the end of symbols section"); /* Set the current decl state to be the global state. */ decl_data->current_decl_state = decl_data->global_decl_state; lto_data_in_delete (data_in); } /* Custom version of strtoll, which is not portable. */ static int64_t lto_parse_hex (const char *p) { int64_t ret = 0; for (; *p != '\0'; ++p) { char c = *p; unsigned char part; ret <<= 4; if (c >= '0' && c <= '9') part = c - '0'; else if (c >= 'a' && c <= 'f') part = c - 'a' + 10; else if (c >= 'A' && c <= 'F') part = c - 'A' + 10; else internal_error ("could not parse hex number"); ret |= part; } return ret; } /* Read resolution for file named FILE_NAME. The resolution is read from RESOLUTION. */ static void lto_resolution_read (splay_tree file_ids, FILE *resolution, lto_file *file) { /* We require that objects in the resolution file are in the same order as the lto1 command line. */ unsigned int name_len; char *obj_name; unsigned int num_symbols; unsigned int i; struct lto_file_decl_data *file_data; splay_tree_node nd = NULL; if (!resolution) return; name_len = strlen (file->filename); obj_name = XNEWVEC (char, name_len + 1); fscanf (resolution, " "); /* Read white space. */ fread (obj_name, sizeof (char), name_len, resolution); obj_name[name_len] = '\0'; if (filename_cmp (obj_name, file->filename) != 0) internal_error ("unexpected file name %s in linker resolution file. " "Expected %s", obj_name, file->filename); if (file->offset != 0) { int t; char offset_p[17]; int64_t offset; t = fscanf (resolution, "@0x%16s", offset_p); if (t != 1) internal_error ("could not parse file offset"); offset = lto_parse_hex (offset_p); if (offset != file->offset) internal_error ("unexpected offset"); } free (obj_name); fscanf (resolution, "%u", &num_symbols); for (i = 0; i < num_symbols; i++) { int t; unsigned index; unsigned HOST_WIDE_INT id; char r_str[27]; enum ld_plugin_symbol_resolution r = (enum ld_plugin_symbol_resolution) 0; unsigned int j; unsigned int lto_resolution_str_len = sizeof (lto_resolution_str) / sizeof (char *); res_pair rp; t = fscanf (resolution, "%u " HOST_WIDE_INT_PRINT_HEX_PURE " %26s %*[^\n]\n", &index, &id, r_str); if (t != 3) internal_error ("invalid line in the resolution file"); for (j = 0; j < lto_resolution_str_len; j++) { if (strcmp (lto_resolution_str[j], r_str) == 0) { r = (enum ld_plugin_symbol_resolution) j; break; } } if (j == lto_resolution_str_len) internal_error ("invalid resolution in the resolution file"); if (!(nd && lto_splay_tree_id_equal_p (nd->key, id))) { nd = lto_splay_tree_lookup (file_ids, id); if (nd == NULL) internal_error ("resolution sub id %wx not in object file", id); } file_data = (struct lto_file_decl_data *)nd->value; /* The indexes are very sparse. To save memory save them in a compact format that is only unpacked later when the subfile is processed. */ rp.res = r; rp.index = index; file_data->respairs.safe_push (rp); if (file_data->max_index < index) file_data->max_index = index; } } /* List of file_decl_datas */ struct file_data_list { struct lto_file_decl_data *first, *last; }; /* Is the name for a id'ed LTO section? */ static int lto_section_with_id (const char *name, unsigned HOST_WIDE_INT *id) { const char *s; if (strncmp (name, section_name_prefix, strlen (section_name_prefix))) return 0; s = strrchr (name, '.'); return s && sscanf (s, "." HOST_WIDE_INT_PRINT_HEX_PURE, id) == 1; } /* Create file_data of each sub file id */ static int create_subid_section_table (struct lto_section_slot *ls, splay_tree file_ids, struct file_data_list *list) { struct lto_section_slot s_slot, *new_slot; unsigned HOST_WIDE_INT id; splay_tree_node nd; void **hash_slot; char *new_name; struct lto_file_decl_data *file_data; if (!lto_section_with_id (ls->name, &id)) return 1; /* Find hash table of sub module id */ nd = lto_splay_tree_lookup (file_ids, id); if (nd != NULL) { file_data = (struct lto_file_decl_data *)nd->value; } else { file_data = ggc_alloc<lto_file_decl_data> (); memset(file_data, 0, sizeof (struct lto_file_decl_data)); file_data->id = id; file_data->section_hash_table = lto_obj_create_section_hash_table ();; lto_splay_tree_insert (file_ids, id, file_data); /* Maintain list in linker order */ if (!list->first) list->first = file_data; if (list->last) list->last->next = file_data; list->last = file_data; } /* Copy section into sub module hash table */ new_name = XDUPVEC (char, ls->name, strlen (ls->name) + 1); s_slot.name = new_name; hash_slot = htab_find_slot (file_data->section_hash_table, &s_slot, INSERT); gcc_assert (*hash_slot == NULL); new_slot = XDUP (struct lto_section_slot, ls); new_slot->name = new_name; *hash_slot = new_slot; return 1; } /* Read declarations and other initializations for a FILE_DATA. */ static void lto_file_finalize (struct lto_file_decl_data *file_data, lto_file *file) { const char *data; size_t len; vec<ld_plugin_symbol_resolution_t> resolutions = vNULL; int i; res_pair *rp; /* Create vector for fast access of resolution. We do this lazily to save memory. */ resolutions.safe_grow_cleared (file_data->max_index + 1); for (i = 0; file_data->respairs.iterate (i, &rp); i++) resolutions[rp->index] = rp->res; file_data->respairs.release (); file_data->renaming_hash_table = lto_create_renaming_table (); file_data->file_name = file->filename; #ifdef ACCEL_COMPILER lto_input_mode_table (file_data); #else file_data->mode_table = lto_mode_identity_table; #endif data = lto_get_section_data (file_data, LTO_section_decls, NULL, &len); if (data == NULL) { internal_error ("cannot read LTO decls from %s", file_data->file_name); return; } /* Frees resolutions */ lto_read_decls (file_data, data, resolutions); lto_free_section_data (file_data, LTO_section_decls, NULL, data, len); } /* Finalize FILE_DATA in FILE and increase COUNT. */ static int lto_create_files_from_ids (lto_file *file, struct lto_file_decl_data *file_data, int *count) { lto_file_finalize (file_data, file); if (symtab->dump_file) fprintf (symtab->dump_file, "Creating file %s with sub id " HOST_WIDE_INT_PRINT_HEX "\n", file_data->file_name, file_data->id); (*count)++; return 0; } /* Generate a TREE representation for all types and external decls entities in FILE. Read all of the globals out of the file. Then read the cgraph and process the .o index into the cgraph nodes so that it can open the .o file to load the functions and ipa information. */ static struct lto_file_decl_data * lto_file_read (lto_file *file, FILE *resolution_file, int *count) { struct lto_file_decl_data *file_data = NULL; splay_tree file_ids; htab_t section_hash_table; struct lto_section_slot *section; struct file_data_list file_list; struct lto_section_list section_list; memset (&section_list, 0, sizeof (struct lto_section_list)); section_hash_table = lto_obj_build_section_table (file, &section_list); /* Find all sub modules in the object and put their sections into new hash tables in a splay tree. */ file_ids = lto_splay_tree_new (); memset (&file_list, 0, sizeof (struct file_data_list)); for (section = section_list.first; section != NULL; section = section->next) create_subid_section_table (section, file_ids, &file_list); /* Add resolutions to file ids */ lto_resolution_read (file_ids, resolution_file, file); /* Finalize each lto file for each submodule in the merged object */ for (file_data = file_list.first; file_data != NULL; file_data = file_data->next) lto_create_files_from_ids (file, file_data, count); splay_tree_delete (file_ids); htab_delete (section_hash_table); return file_list.first; } #if HAVE_MMAP_FILE && HAVE_SYSCONF && defined _SC_PAGE_SIZE #define LTO_MMAP_IO 1 #endif #if LTO_MMAP_IO /* Page size of machine is used for mmap and munmap calls. */ static size_t page_mask; #endif /* Get the section data of length LEN from FILENAME starting at OFFSET. The data segment must be freed by the caller when the caller is finished. Returns NULL if all was not well. */ static char * lto_read_section_data (struct lto_file_decl_data *file_data, intptr_t offset, size_t len) { char *result; static int fd = -1; static char *fd_name; #if LTO_MMAP_IO intptr_t computed_len; intptr_t computed_offset; intptr_t diff; #endif /* Keep a single-entry file-descriptor cache. The last file we touched will get closed at exit. ??? Eventually we want to add a more sophisticated larger cache or rather fix function body streaming to not stream them in practically random order. */ if (fd != -1 && filename_cmp (fd_name, file_data->file_name) != 0) { free (fd_name); close (fd); fd = -1; } if (fd == -1) { fd = open (file_data->file_name, O_RDONLY|O_BINARY); if (fd == -1) { fatal_error (input_location, "Cannot open %s", file_data->file_name); return NULL; } fd_name = xstrdup (file_data->file_name); } #if LTO_MMAP_IO if (!page_mask) { size_t page_size = sysconf (_SC_PAGE_SIZE); page_mask = ~(page_size - 1); } computed_offset = offset & page_mask; diff = offset - computed_offset; computed_len = len + diff; result = (char *) mmap (NULL, computed_len, PROT_READ, MAP_PRIVATE, fd, computed_offset); if (result == MAP_FAILED) { fatal_error (input_location, "Cannot map %s", file_data->file_name); return NULL; } return result + diff; #else result = (char *) xmalloc (len); if (lseek (fd, offset, SEEK_SET) != offset || read (fd, result, len) != (ssize_t) len) { free (result); fatal_error (input_location, "Cannot read %s", file_data->file_name); result = NULL; } #ifdef __MINGW32__ /* Native windows doesn't supports delayed unlink on opened file. So we close file here again. This produces higher I/O load, but at least it prevents to have dangling file handles preventing unlink. */ free (fd_name); fd_name = NULL; close (fd); fd = -1; #endif return result; #endif } /* Get the section data from FILE_DATA of SECTION_TYPE with NAME. NAME will be NULL unless the section type is for a function body. */ static const char * get_section_data (struct lto_file_decl_data *file_data, enum lto_section_type section_type, const char *name, size_t *len) { htab_t section_hash_table = file_data->section_hash_table; struct lto_section_slot *f_slot; struct lto_section_slot s_slot; const char *section_name = lto_get_section_name (section_type, name, file_data); char *data = NULL; *len = 0; s_slot.name = section_name; f_slot = (struct lto_section_slot *) htab_find (section_hash_table, &s_slot); if (f_slot) { data = lto_read_section_data (file_data, f_slot->start, f_slot->len); *len = f_slot->len; } free (CONST_CAST (char *, section_name)); return data; } /* Free the section data from FILE_DATA of SECTION_TYPE with NAME that starts at OFFSET and has LEN bytes. */ static void free_section_data (struct lto_file_decl_data *file_data ATTRIBUTE_UNUSED, enum lto_section_type section_type ATTRIBUTE_UNUSED, const char *name ATTRIBUTE_UNUSED, const char *offset, size_t len ATTRIBUTE_UNUSED) { #if LTO_MMAP_IO intptr_t computed_len; intptr_t computed_offset; intptr_t diff; #endif #if LTO_MMAP_IO computed_offset = ((intptr_t) offset) & page_mask; diff = (intptr_t) offset - computed_offset; computed_len = len + diff; munmap ((caddr_t) computed_offset, computed_len); #else free (CONST_CAST(char *, offset)); #endif } static lto_file *current_lto_file; /* Helper for qsort; compare partitions and return one with smaller size. We sort from greatest to smallest so parallel build doesn't stale on the longest compilation being executed too late. */ static int cmp_partitions_size (const void *a, const void *b) { const struct ltrans_partition_def *pa = *(struct ltrans_partition_def *const *)a; const struct ltrans_partition_def *pb = *(struct ltrans_partition_def *const *)b; return pb->insns - pa->insns; } /* Helper for qsort; compare partitions and return one with smaller order. */ static int cmp_partitions_order (const void *a, const void *b) { const struct ltrans_partition_def *pa = *(struct ltrans_partition_def *const *)a; const struct ltrans_partition_def *pb = *(struct ltrans_partition_def *const *)b; int ordera = -1, orderb = -1; if (lto_symtab_encoder_size (pa->encoder)) ordera = lto_symtab_encoder_deref (pa->encoder, 0)->order; if (lto_symtab_encoder_size (pb->encoder)) orderb = lto_symtab_encoder_deref (pb->encoder, 0)->order; return orderb - ordera; } /* Actually stream out ENCODER into TEMP_FILENAME. */ static void do_stream_out (char *temp_filename, lto_symtab_encoder_t encoder) { lto_file *file = lto_obj_file_open (temp_filename, true); if (!file) fatal_error (input_location, "lto_obj_file_open() failed"); lto_set_current_out_file (file); ipa_write_optimization_summaries (encoder); lto_set_current_out_file (NULL); lto_obj_file_close (file); free (file); } /* Wait for forked process and signal errors. */ #ifdef HAVE_WORKING_FORK static void wait_for_child () { int status; do { #ifndef WCONTINUED #define WCONTINUED 0 #endif int w = waitpid (0, &status, WUNTRACED | WCONTINUED); if (w == -1) fatal_error (input_location, "waitpid failed"); if (WIFEXITED (status) && WEXITSTATUS (status)) fatal_error (input_location, "streaming subprocess failed"); else if (WIFSIGNALED (status)) fatal_error (input_location, "streaming subprocess was killed by signal"); } while (!WIFEXITED (status) && !WIFSIGNALED (status)); } #endif /* Stream out ENCODER into TEMP_FILENAME Fork if that seems to help. */ static void stream_out (char *temp_filename, lto_symtab_encoder_t encoder, bool ARG_UNUSED (last)) { #ifdef HAVE_WORKING_FORK static int nruns; if (lto_parallelism <= 1) { do_stream_out (temp_filename, encoder); return; } /* Do not run more than LTO_PARALLELISM streamings FIXME: we ignore limits on jobserver. */ if (lto_parallelism > 0 && nruns >= lto_parallelism) { wait_for_child (); nruns --; } /* If this is not the last parallel partition, execute new streaming process. */ if (!last) { pid_t cpid = fork (); if (!cpid) { setproctitle ("lto1-wpa-streaming"); do_stream_out (temp_filename, encoder); exit (0); } /* Fork failed; lets do the job ourseleves. */ else if (cpid == -1) do_stream_out (temp_filename, encoder); else nruns++; } /* Last partition; stream it and wait for all children to die. */ else { int i; do_stream_out (temp_filename, encoder); for (i = 0; i < nruns; i++) wait_for_child (); } asm_nodes_output = true; #else do_stream_out (temp_filename, encoder); #endif } /* Write all output files in WPA mode and the file with the list of LTRANS units. */ static void lto_wpa_write_files (void) { unsigned i, n_sets; ltrans_partition part; FILE *ltrans_output_list_stream; char *temp_filename; vec <char *>temp_filenames = vNULL; size_t blen; /* Open the LTRANS output list. */ if (!ltrans_output_list) fatal_error (input_location, "no LTRANS output list filename provided"); timevar_push (TV_WHOPR_WPA); FOR_EACH_VEC_ELT (ltrans_partitions, i, part) lto_stats.num_output_symtab_nodes += lto_symtab_encoder_size (part->encoder); timevar_pop (TV_WHOPR_WPA); timevar_push (TV_WHOPR_WPA_IO); /* Generate a prefix for the LTRANS unit files. */ blen = strlen (ltrans_output_list); temp_filename = (char *) xmalloc (blen + sizeof ("2147483648.o")); strcpy (temp_filename, ltrans_output_list); if (blen > sizeof (".out") && strcmp (temp_filename + blen - sizeof (".out") + 1, ".out") == 0) temp_filename[blen - sizeof (".out") + 1] = '\0'; blen = strlen (temp_filename); n_sets = ltrans_partitions.length (); /* Sort partitions by size so small ones are compiled last. FIXME: Even when not reordering we may want to output one list for parallel make and other for final link command. */ if (!flag_profile_reorder_functions || !flag_profile_use) ltrans_partitions.qsort (flag_toplevel_reorder ? cmp_partitions_size : cmp_partitions_order); for (i = 0; i < n_sets; i++) { ltrans_partition part = ltrans_partitions[i]; /* Write all the nodes in SET. */ sprintf (temp_filename + blen, "%u.o", i); if (!quiet_flag) fprintf (stderr, " %s (%s %i insns)", temp_filename, part->name, part->insns); if (symtab->dump_file) { lto_symtab_encoder_iterator lsei; fprintf (symtab->dump_file, "Writing partition %s to file %s, %i insns\n", part->name, temp_filename, part->insns); fprintf (symtab->dump_file, " Symbols in partition: "); for (lsei = lsei_start_in_partition (part->encoder); !lsei_end_p (lsei); lsei_next_in_partition (&lsei)) { symtab_node *node = lsei_node (lsei); fprintf (symtab->dump_file, "%s ", node->asm_name ()); } fprintf (symtab->dump_file, "\n Symbols in boundary: "); for (lsei = lsei_start (part->encoder); !lsei_end_p (lsei); lsei_next (&lsei)) { symtab_node *node = lsei_node (lsei); if (!lto_symtab_encoder_in_partition_p (part->encoder, node)) { fprintf (symtab->dump_file, "%s ", node->asm_name ()); cgraph_node *cnode = dyn_cast <cgraph_node *> (node); if (cnode && lto_symtab_encoder_encode_body_p (part->encoder, cnode)) fprintf (symtab->dump_file, "(body included)"); else { varpool_node *vnode = dyn_cast <varpool_node *> (node); if (vnode && lto_symtab_encoder_encode_initializer_p (part->encoder, vnode)) fprintf (symtab->dump_file, "(initializer included)"); } } } fprintf (symtab->dump_file, "\n"); } gcc_checking_assert (lto_symtab_encoder_size (part->encoder) || !i); stream_out (temp_filename, part->encoder, i == n_sets - 1); part->encoder = NULL; temp_filenames.safe_push (xstrdup (temp_filename)); } ltrans_output_list_stream = fopen (ltrans_output_list, "w"); if (ltrans_output_list_stream == NULL) fatal_error (input_location, "opening LTRANS output list %s: %m", ltrans_output_list); for (i = 0; i < n_sets; i++) { unsigned int len = strlen (temp_filenames[i]); if (fwrite (temp_filenames[i], 1, len, ltrans_output_list_stream) < len || fwrite ("\n", 1, 1, ltrans_output_list_stream) < 1) fatal_error (input_location, "writing to LTRANS output list %s: %m", ltrans_output_list); free (temp_filenames[i]); } temp_filenames.release(); lto_stats.num_output_files += n_sets; /* Close the LTRANS output list. */ if (fclose (ltrans_output_list_stream)) fatal_error (input_location, "closing LTRANS output list %s: %m", ltrans_output_list); free_ltrans_partitions(); free (temp_filename); timevar_pop (TV_WHOPR_WPA_IO); } /* If TT is a variable or function decl replace it with its prevailing variant. */ #define LTO_SET_PREVAIL(tt) \ do {\ if ((tt) && VAR_OR_FUNCTION_DECL_P (tt) \ && (TREE_PUBLIC (tt) || DECL_EXTERNAL (tt))) \ { \ tt = lto_symtab_prevailing_decl (tt); \ fixed = true; \ } \ } while (0) /* Ensure that TT isn't a replacable var of function decl. */ #define LTO_NO_PREVAIL(tt) \ gcc_assert (!(tt) || !VAR_OR_FUNCTION_DECL_P (tt)) /* Given a tree T replace all fields referring to variables or functions with their prevailing variant. */ static void lto_fixup_prevailing_decls (tree t) { enum tree_code code = TREE_CODE (t); bool fixed = false; gcc_checking_assert (code != TREE_BINFO); LTO_NO_PREVAIL (TREE_TYPE (t)); if (CODE_CONTAINS_STRUCT (code, TS_COMMON)) LTO_NO_PREVAIL (TREE_CHAIN (t)); if (DECL_P (t)) { LTO_NO_PREVAIL (DECL_NAME (t)); LTO_SET_PREVAIL (DECL_CONTEXT (t)); if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON)) { LTO_SET_PREVAIL (DECL_SIZE (t)); LTO_SET_PREVAIL (DECL_SIZE_UNIT (t)); LTO_SET_PREVAIL (DECL_INITIAL (t)); LTO_NO_PREVAIL (DECL_ATTRIBUTES (t)); LTO_SET_PREVAIL (DECL_ABSTRACT_ORIGIN (t)); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_WITH_VIS)) { LTO_NO_PREVAIL (t->decl_with_vis.assembler_name); } if (CODE_CONTAINS_STRUCT (code, TS_DECL_NON_COMMON)) { LTO_NO_PREVAIL (DECL_RESULT_FLD (t)); } if (CODE_CONTAINS_STRUCT (code, TS_FUNCTION_DECL)) { LTO_NO_PREVAIL (DECL_ARGUMENTS (t)); LTO_SET_PREVAIL (DECL_FUNCTION_PERSONALITY (t)); LTO_NO_PREVAIL (DECL_VINDEX (t)); } if (CODE_CONTAINS_STRUCT (code, TS_FIELD_DECL)) { LTO_SET_PREVAIL (DECL_FIELD_OFFSET (t)); LTO_NO_PREVAIL (DECL_BIT_FIELD_TYPE (t)); LTO_NO_PREVAIL (DECL_QUALIFIER (t)); LTO_NO_PREVAIL (DECL_FIELD_BIT_OFFSET (t)); LTO_NO_PREVAIL (DECL_FCONTEXT (t)); } } else if (TYPE_P (t)) { LTO_NO_PREVAIL (TYPE_CACHED_VALUES (t)); LTO_SET_PREVAIL (TYPE_SIZE (t)); LTO_SET_PREVAIL (TYPE_SIZE_UNIT (t)); LTO_NO_PREVAIL (TYPE_ATTRIBUTES (t)); LTO_NO_PREVAIL (TYPE_NAME (t)); LTO_SET_PREVAIL (TYPE_MINVAL (t)); LTO_SET_PREVAIL (TYPE_MAXVAL (t)); LTO_NO_PREVAIL (t->type_non_common.binfo); LTO_SET_PREVAIL (TYPE_CONTEXT (t)); LTO_NO_PREVAIL (TYPE_CANONICAL (t)); LTO_NO_PREVAIL (TYPE_MAIN_VARIANT (t)); LTO_NO_PREVAIL (TYPE_NEXT_VARIANT (t)); } else if (EXPR_P (t)) { int i; for (i = TREE_OPERAND_LENGTH (t) - 1; i >= 0; --i) LTO_SET_PREVAIL (TREE_OPERAND (t, i)); } else if (TREE_CODE (t) == CONSTRUCTOR) { unsigned i; tree val; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (t), i, val) LTO_SET_PREVAIL (val); } else { switch (code) { case TREE_LIST: LTO_SET_PREVAIL (TREE_VALUE (t)); LTO_SET_PREVAIL (TREE_PURPOSE (t)); LTO_NO_PREVAIL (TREE_PURPOSE (t)); break; default: gcc_unreachable (); } } /* If we fixed nothing, then we missed something seen by mentions_vars_p. */ gcc_checking_assert (fixed); } #undef LTO_SET_PREVAIL #undef LTO_NO_PREVAIL /* Helper function of lto_fixup_decls. Walks the var and fn streams in STATE, replaces var and function decls with the corresponding prevailing def. */ static void lto_fixup_state (struct lto_in_decl_state *state) { unsigned i, si; /* Although we only want to replace FUNCTION_DECLs and VAR_DECLs, we still need to walk from all DECLs to find the reachable FUNCTION_DECLs and VAR_DECLs. */ for (si = 0; si < LTO_N_DECL_STREAMS; si++) { vec<tree, va_gc> *trees = state->streams[si]; for (i = 0; i < vec_safe_length (trees); i++) { tree t = (*trees)[i]; if (VAR_OR_FUNCTION_DECL_P (t) && (TREE_PUBLIC (t) || DECL_EXTERNAL (t))) (*trees)[i] = lto_symtab_prevailing_decl (t); } } } /* Fix the decls from all FILES. Replaces each decl with the corresponding prevailing one. */ static void lto_fixup_decls (struct lto_file_decl_data **files) { unsigned int i; tree t; if (tree_with_vars) FOR_EACH_VEC_ELT ((*tree_with_vars), i, t) lto_fixup_prevailing_decls (t); for (i = 0; files[i]; i++) { struct lto_file_decl_data *file = files[i]; struct lto_in_decl_state *state = file->global_decl_state; lto_fixup_state (state); hash_table<decl_state_hasher>::iterator iter; lto_in_decl_state *elt; FOR_EACH_HASH_TABLE_ELEMENT (*file->function_decl_states, elt, lto_in_decl_state *, iter) lto_fixup_state (elt); } } static GTY((length ("lto_stats.num_input_files + 1"))) struct lto_file_decl_data **all_file_decl_data; /* Turn file datas for sub files into a single array, so that they look like separate files for further passes. */ static void lto_flatten_files (struct lto_file_decl_data **orig, int count, int last_file_ix) { struct lto_file_decl_data *n, *next; int i, k; lto_stats.num_input_files = count; all_file_decl_data = ggc_cleared_vec_alloc<lto_file_decl_data_ptr> (count + 1); /* Set the hooks so that all of the ipa passes can read in their data. */ lto_set_in_hooks (all_file_decl_data, get_section_data, free_section_data); for (i = 0, k = 0; i < last_file_ix; i++) { for (n = orig[i]; n != NULL; n = next) { all_file_decl_data[k++] = n; next = n->next; n->next = NULL; } } all_file_decl_data[k] = NULL; gcc_assert (k == count); } /* Input file data before flattening (i.e. splitting them to subfiles to support incremental linking. */ static int real_file_count; static GTY((length ("real_file_count + 1"))) struct lto_file_decl_data **real_file_decl_data; static void print_lto_report_1 (void); /* Read all the symbols from the input files FNAMES. NFILES is the number of files requested in the command line. Instantiate a global call graph by aggregating all the sub-graphs found in each file. */ static void read_cgraph_and_symbols (unsigned nfiles, const char **fnames) { unsigned int i, last_file_ix; FILE *resolution; int count = 0; struct lto_file_decl_data **decl_data; symtab_node *snode; symtab->initialize (); timevar_push (TV_IPA_LTO_DECL_IN); #ifdef ACCEL_COMPILER section_name_prefix = OFFLOAD_SECTION_NAME_PREFIX; lto_stream_offload_p = true; #endif real_file_decl_data = decl_data = ggc_cleared_vec_alloc<lto_file_decl_data_ptr> (nfiles + 1); real_file_count = nfiles; /* Read the resolution file. */ resolution = NULL; if (resolution_file_name) { int t; unsigned num_objects; resolution = fopen (resolution_file_name, "r"); if (resolution == NULL) fatal_error (input_location, "could not open symbol resolution file: %m"); t = fscanf (resolution, "%u", &num_objects); gcc_assert (t == 1); /* True, since the plugin splits the archives. */ gcc_assert (num_objects == nfiles); } symtab->state = LTO_STREAMING; canonical_type_hash_cache = new hash_map<const_tree, hashval_t> (251); gimple_canonical_types = htab_create (16381, gimple_canonical_type_hash, gimple_canonical_type_eq, NULL); gcc_obstack_init (&tree_scc_hash_obstack); tree_scc_hash = new hash_table<tree_scc_hasher> (4096); /* Register the common node types with the canonical type machinery so we properly share alias-sets across languages and TUs. Do not expose the common nodes as type merge target - those that should be are already exposed so by pre-loading the LTO streamer caches. Do two passes - first clear TYPE_CANONICAL and then re-compute it. */ for (i = 0; i < itk_none; ++i) lto_register_canonical_types (integer_types[i], true); for (i = 0; i < stk_type_kind_last; ++i) lto_register_canonical_types (sizetype_tab[i], true); for (i = 0; i < TI_MAX; ++i) lto_register_canonical_types (global_trees[i], true); for (i = 0; i < itk_none; ++i) lto_register_canonical_types (integer_types[i], false); for (i = 0; i < stk_type_kind_last; ++i) lto_register_canonical_types (sizetype_tab[i], false); for (i = 0; i < TI_MAX; ++i) lto_register_canonical_types (global_trees[i], false); if (!quiet_flag) fprintf (stderr, "Reading object files:"); /* Read all of the object files specified on the command line. */ for (i = 0, last_file_ix = 0; i < nfiles; ++i) { struct lto_file_decl_data *file_data = NULL; if (!quiet_flag) { fprintf (stderr, " %s", fnames[i]); fflush (stderr); } current_lto_file = lto_obj_file_open (fnames[i], false); if (!current_lto_file) break; file_data = lto_file_read (current_lto_file, resolution, &count); if (!file_data) { lto_obj_file_close (current_lto_file); free (current_lto_file); current_lto_file = NULL; break; } decl_data[last_file_ix++] = file_data; lto_obj_file_close (current_lto_file); free (current_lto_file); current_lto_file = NULL; } lto_flatten_files (decl_data, count, last_file_ix); lto_stats.num_input_files = count; ggc_free(decl_data); real_file_decl_data = NULL; if (resolution_file_name) fclose (resolution); /* Show the LTO report before launching LTRANS. */ if (flag_lto_report || (flag_wpa && flag_lto_report_wpa)) print_lto_report_1 (); /* Free gimple type merging datastructures. */ delete tree_scc_hash; tree_scc_hash = NULL; obstack_free (&tree_scc_hash_obstack, NULL); htab_delete (gimple_canonical_types); gimple_canonical_types = NULL; delete canonical_type_hash_cache; canonical_type_hash_cache = NULL; /* At this stage we know that majority of GGC memory is reachable. Growing the limits prevents unnecesary invocation of GGC. */ ggc_grow (); ggc_collect (); /* Set the hooks so that all of the ipa passes can read in their data. */ lto_set_in_hooks (all_file_decl_data, get_section_data, free_section_data); timevar_pop (TV_IPA_LTO_DECL_IN); if (!quiet_flag) fprintf (stderr, "\nReading the callgraph\n"); timevar_push (TV_IPA_LTO_CGRAPH_IO); /* Read the symtab. */ input_symtab (); input_offload_tables (); /* Store resolutions into the symbol table. */ ld_plugin_symbol_resolution_t *res; FOR_EACH_SYMBOL (snode) if (snode->real_symbol_p () && snode->lto_file_data && snode->lto_file_data->resolution_map && (res = snode->lto_file_data->resolution_map->get (snode->decl))) snode->resolution = *res; for (i = 0; all_file_decl_data[i]; i++) if (all_file_decl_data[i]->resolution_map) { delete all_file_decl_data[i]->resolution_map; all_file_decl_data[i]->resolution_map = NULL; } timevar_pop (TV_IPA_LTO_CGRAPH_IO); if (!quiet_flag) fprintf (stderr, "Merging declarations\n"); timevar_push (TV_IPA_LTO_DECL_MERGE); /* Merge global decls. In ltrans mode we read merged cgraph, we do not need to care about resolving symbols again, we only need to replace duplicated declarations read from the callgraph and from function sections. */ if (!flag_ltrans) { lto_symtab_merge_decls (); /* If there were errors during symbol merging bail out, we have no good way to recover here. */ if (seen_error ()) fatal_error (input_location, "errors during merging of translation units"); /* Fixup all decls. */ lto_fixup_decls (all_file_decl_data); } if (tree_with_vars) ggc_free (tree_with_vars); tree_with_vars = NULL; ggc_collect (); timevar_pop (TV_IPA_LTO_DECL_MERGE); /* Each pass will set the appropriate timer. */ if (!quiet_flag) fprintf (stderr, "Reading summaries\n"); /* Read the IPA summary data. */ if (flag_ltrans) ipa_read_optimization_summaries (); else ipa_read_summaries (); for (i = 0; all_file_decl_data[i]; i++) { gcc_assert (all_file_decl_data[i]->symtab_node_encoder); lto_symtab_encoder_delete (all_file_decl_data[i]->symtab_node_encoder); all_file_decl_data[i]->symtab_node_encoder = NULL; lto_free_function_in_decl_state (all_file_decl_data[i]->global_decl_state); all_file_decl_data[i]->global_decl_state = NULL; all_file_decl_data[i]->current_decl_state = NULL; } /* Finally merge the cgraph according to the decl merging decisions. */ timevar_push (TV_IPA_LTO_CGRAPH_MERGE); if (symtab->dump_file) { fprintf (symtab->dump_file, "Before merging:\n"); symtab_node::dump_table (symtab->dump_file); } if (!flag_ltrans) { lto_symtab_merge_symbols (); /* Removal of unreachable symbols is needed to make verify_symtab to pass; we are still having duplicated comdat groups containing local statics. We could also just remove them while merging. */ symtab->remove_unreachable_nodes (dump_file); } ggc_collect (); symtab->state = IPA_SSA; /* FIXME: Technically all node removals happening here are useless, because WPA should not stream them. */ if (flag_ltrans) symtab->remove_unreachable_nodes (dump_file); timevar_pop (TV_IPA_LTO_CGRAPH_MERGE); /* Indicate that the cgraph is built and ready. */ symtab->function_flags_ready = true; ggc_free (all_file_decl_data); all_file_decl_data = NULL; } /* Materialize all the bodies for all the nodes in the callgraph. */ static void materialize_cgraph (void) { struct cgraph_node *node; timevar_id_t lto_timer; if (!quiet_flag) fprintf (stderr, flag_wpa ? "Materializing decls:" : "Reading function bodies:"); FOR_EACH_FUNCTION (node) { if (node->lto_file_data) { lto_materialize_function (node); lto_stats.num_input_cgraph_nodes++; } } /* Start the appropriate timer depending on the mode that we are operating in. */ lto_timer = (flag_wpa) ? TV_WHOPR_WPA : (flag_ltrans) ? TV_WHOPR_LTRANS : TV_LTO; timevar_push (lto_timer); current_function_decl = NULL; set_cfun (NULL); if (!quiet_flag) fprintf (stderr, "\n"); timevar_pop (lto_timer); } /* Show various memory usage statistics related to LTO. */ static void print_lto_report_1 (void) { const char *pfx = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS"; fprintf (stderr, "%s statistics\n", pfx); fprintf (stderr, "[%s] read %lu SCCs of average size %f\n", pfx, num_sccs_read, total_scc_size / (double)num_sccs_read); fprintf (stderr, "[%s] %lu tree bodies read in total\n", pfx, total_scc_size); if (flag_wpa && tree_scc_hash) { fprintf (stderr, "[%s] tree SCC table: size %ld, %ld elements, " "collision ratio: %f\n", pfx, (long) tree_scc_hash->size (), (long) tree_scc_hash->elements (), tree_scc_hash->collisions ()); hash_table<tree_scc_hasher>::iterator hiter; tree_scc *scc, *max_scc = NULL; unsigned max_length = 0; FOR_EACH_HASH_TABLE_ELEMENT (*tree_scc_hash, scc, x, hiter) { unsigned length = 0; tree_scc *s = scc; for (; s; s = s->next) length++; if (length > max_length) { max_length = length; max_scc = scc; } } fprintf (stderr, "[%s] tree SCC max chain length %u (size %u)\n", pfx, max_length, max_scc->len); fprintf (stderr, "[%s] Compared %lu SCCs, %lu collisions (%f)\n", pfx, num_scc_compares, num_scc_compare_collisions, num_scc_compare_collisions / (double) num_scc_compares); fprintf (stderr, "[%s] Merged %lu SCCs\n", pfx, num_sccs_merged); fprintf (stderr, "[%s] Merged %lu tree bodies\n", pfx, total_scc_size_merged); fprintf (stderr, "[%s] Merged %lu types\n", pfx, num_merged_types); fprintf (stderr, "[%s] %lu types prevailed (%lu associated trees)\n", pfx, num_prevailing_types, num_type_scc_trees); fprintf (stderr, "[%s] GIMPLE canonical type table: size %ld, " "%ld elements, %ld searches, %ld collisions (ratio: %f)\n", pfx, (long) htab_size (gimple_canonical_types), (long) htab_elements (gimple_canonical_types), (long) gimple_canonical_types->searches, (long) gimple_canonical_types->collisions, htab_collisions (gimple_canonical_types)); fprintf (stderr, "[%s] GIMPLE canonical type pointer-map: " "%lu elements, %ld searches\n", pfx, num_canonical_type_hash_entries, num_canonical_type_hash_queries); } print_lto_report (pfx); } /* Perform whole program analysis (WPA) on the callgraph and write out the optimization plan. */ static void do_whole_program_analysis (void) { symtab_node *node; lto_parallelism = 1; /* TODO: jobserver communicatoin is not supported, yet. */ if (!strcmp (flag_wpa, "jobserver")) lto_parallelism = -1; else { lto_parallelism = atoi (flag_wpa); if (lto_parallelism <= 0) lto_parallelism = 0; } timevar_start (TV_PHASE_OPT_GEN); /* Note that since we are in WPA mode, materialize_cgraph will not actually read in all the function bodies. It only materializes the decls and cgraph nodes so that analysis can be performed. */ materialize_cgraph (); /* Reading in the cgraph uses different timers, start timing WPA now. */ timevar_push (TV_WHOPR_WPA); if (pre_ipa_mem_report) { fprintf (stderr, "Memory consumption before IPA\n"); dump_memory_report (false); } symtab->function_flags_ready = true; if (symtab->dump_file) symtab_node::dump_table (symtab->dump_file); bitmap_obstack_initialize (NULL); symtab->state = IPA_SSA; execute_ipa_pass_list (g->get_passes ()->all_regular_ipa_passes); if (symtab->dump_file) { fprintf (symtab->dump_file, "Optimized "); symtab_node::dump_table (symtab->dump_file); } #ifdef ENABLE_CHECKING symtab_node::verify_symtab_nodes (); #endif bitmap_obstack_release (NULL); /* We are about to launch the final LTRANS phase, stop the WPA timer. */ timevar_pop (TV_WHOPR_WPA); timevar_push (TV_WHOPR_PARTITIONING); if (flag_lto_partition == LTO_PARTITION_1TO1) lto_1_to_1_map (); else if (flag_lto_partition == LTO_PARTITION_MAX) lto_max_map (); else if (flag_lto_partition == LTO_PARTITION_ONE) lto_balanced_map (1); else if (flag_lto_partition == LTO_PARTITION_BALANCED) lto_balanced_map (PARAM_VALUE (PARAM_LTO_PARTITIONS)); else gcc_unreachable (); /* Inline summaries are needed for balanced partitioning. Free them now so the memory can be used for streamer caches. */ inline_free_summary (); /* AUX pointers are used by partitioning code to bookkeep number of partitions symbol is in. This is no longer needed. */ FOR_EACH_SYMBOL (node) node->aux = NULL; lto_stats.num_cgraph_partitions += ltrans_partitions.length (); /* Find out statics that need to be promoted to globals with hidden visibility because they are accessed from multiple partitions. */ lto_promote_cross_file_statics (); timevar_pop (TV_WHOPR_PARTITIONING); timevar_stop (TV_PHASE_OPT_GEN); /* Collect a last time - in lto_wpa_write_files we may end up forking with the idea that this doesn't increase memory usage. So we absoultely do not want to collect after that. */ ggc_collect (); timevar_start (TV_PHASE_STREAM_OUT); if (!quiet_flag) { fprintf (stderr, "\nStreaming out"); fflush (stderr); } lto_wpa_write_files (); if (!quiet_flag) fprintf (stderr, "\n"); timevar_stop (TV_PHASE_STREAM_OUT); if (post_ipa_mem_report) { fprintf (stderr, "Memory consumption after IPA\n"); dump_memory_report (false); } /* Show the LTO report before launching LTRANS. */ if (flag_lto_report || (flag_wpa && flag_lto_report_wpa)) print_lto_report_1 (); if (mem_report_wpa) dump_memory_report (true); } static GTY(()) tree lto_eh_personality_decl; /* Return the LTO personality function decl. */ tree lto_eh_personality (void) { if (!lto_eh_personality_decl) { /* Use the first personality DECL for our personality if we don't support multiple ones. This ensures that we don't artificially create the need for them in a single-language program. */ if (first_personality_decl && !dwarf2out_do_cfi_asm ()) lto_eh_personality_decl = first_personality_decl; else lto_eh_personality_decl = lhd_gcc_personality (); } return lto_eh_personality_decl; } /* Set the process name based on the LTO mode. */ static void lto_process_name (void) { if (flag_lto) setproctitle ("lto1-lto"); if (flag_wpa) setproctitle ("lto1-wpa"); if (flag_ltrans) setproctitle ("lto1-ltrans"); } /* Initialize the LTO front end. */ static void lto_init (void) { lto_process_name (); lto_streamer_hooks_init (); lto_reader_init (); lto_set_in_hooks (NULL, get_section_data, free_section_data); memset (&lto_stats, 0, sizeof (lto_stats)); bitmap_obstack_initialize (NULL); gimple_register_cfg_hooks (); #ifndef ACCEL_COMPILER unsigned char *table = ggc_vec_alloc<unsigned char> (MAX_MACHINE_MODE); for (int m = 0; m < MAX_MACHINE_MODE; m++) table[m] = m; lto_mode_identity_table = table; #endif } /* Main entry point for the GIMPLE front end. This front end has three main personalities: - LTO (-flto). All the object files on the command line are loaded in memory and processed as a single translation unit. This is the traditional link-time optimization behavior. - WPA (-fwpa). Only the callgraph and summary information for files in the command file are loaded. A single callgraph (without function bodies) is instantiated for the whole set of files. IPA passes are only allowed to analyze the call graph and make transformation decisions. The callgraph is partitioned, each partition is written to a new object file together with the transformation decisions. - LTRANS (-fltrans). Similar to -flto but it prevents the IPA summary files from running again. Since WPA computed summary information and decided what transformations to apply, LTRANS simply applies them. */ void lto_main (void) { /* LTO is called as a front end, even though it is not a front end. Because it is called as a front end, TV_PHASE_PARSING and TV_PARSE_GLOBAL are active, and we need to turn them off while doing LTO. Later we turn them back on so they are active up in toplev.c. */ timevar_pop (TV_PARSE_GLOBAL); timevar_stop (TV_PHASE_PARSING); timevar_start (TV_PHASE_SETUP); /* Initialize the LTO front end. */ lto_init (); timevar_stop (TV_PHASE_SETUP); timevar_start (TV_PHASE_STREAM_IN); /* Read all the symbols and call graph from all the files in the command line. */ read_cgraph_and_symbols (num_in_fnames, in_fnames); timevar_stop (TV_PHASE_STREAM_IN); if (!seen_error ()) { /* If WPA is enabled analyze the whole call graph and create an optimization plan. Otherwise, read in all the function bodies and continue with optimization. */ if (flag_wpa) do_whole_program_analysis (); else { timevar_start (TV_PHASE_OPT_GEN); materialize_cgraph (); if (!flag_ltrans) lto_promote_statics_nonwpa (); /* Let the middle end know that we have read and merged all of the input files. */ symtab->compile (); timevar_stop (TV_PHASE_OPT_GEN); /* FIXME lto, if the processes spawned by WPA fail, we miss the chance to print WPA's report, so WPA will call print_lto_report before launching LTRANS. If LTRANS was launched directly by the driver we would not need to do this. */ if (flag_lto_report || (flag_wpa && flag_lto_report_wpa)) print_lto_report_1 (); } } /* Here we make LTO pretend to be a parser. */ timevar_start (TV_PHASE_PARSING); timevar_push (TV_PARSE_GLOBAL); } #include "gt-lto-lto.h"
apc-llc/gcc-5.1.1-knc
gcc/lto/lto.c
C
gpl-2.0
103,409
/* * LPC32xx SSP interface (SPI mode) * * (C) Copyright 2014 DENX Software Engineering GmbH * Written-by: Albert ARIBAUD <albert.aribaud@3adev.fr> * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <linux/compat.h> #include <asm/io.h> #include <malloc.h> #include <spi.h> #include <asm/arch/clk.h> /* SSP chip registers */ struct ssp_regs { u32 cr0; u32 cr1; u32 data; u32 sr; u32 cpsr; u32 imsc; u32 ris; u32 mis; u32 icr; u32 dmacr; }; /* CR1 register defines */ #define SSP_CR1_SSP_ENABLE 0x0002 /* SR register defines */ #define SSP_SR_TNF 0x0002 /* SSP status RX FIFO not empty bit */ #define SSP_SR_RNE 0x0004 /* lpc32xx spi slave */ struct lpc32xx_spi_slave { struct spi_slave slave; struct ssp_regs *regs; }; static inline struct lpc32xx_spi_slave *to_lpc32xx_spi_slave( struct spi_slave *slave) { return container_of(slave, struct lpc32xx_spi_slave, slave); } /* spi_init is called during boot when CONFIG_CMD_SPI is defined */ void spi_init(void) { /* * nothing to do: clocking was enabled in lpc32xx_ssp_enable() * and configuration will be done in spi_setup_slave() */ } /* the following is called in sequence by do_spi_xfer() */ struct spi_slave *spi_setup_slave(uint bus, uint cs, uint max_hz, uint mode) { struct lpc32xx_spi_slave *lslave; /* we only set up SSP0 for now, so ignore bus */ if (mode & SPI_3WIRE) { error("3-wire mode not supported"); return NULL; } if (mode & SPI_SLAVE) { error("slave mode not supported\n"); return NULL; } if (mode & SPI_PREAMBLE) { error("preamble byte skipping not supported\n"); return NULL; } lslave = spi_alloc_slave(struct lpc32xx_spi_slave, bus, cs); if (!lslave) { printf("SPI_error: Fail to allocate lpc32xx_spi_slave\n"); return NULL; } lslave->regs = (struct ssp_regs *)SSP0_BASE; /* * 8 bit frame, SPI fmt, 500kbps -> clock divider is 26. * Set SCR to 0 and CPSDVSR to 26. */ writel(0x7, &lslave->regs->cr0); /* 8-bit chunks, SPI, 1 clk/bit */ writel(26, &lslave->regs->cpsr); /* SSP clock = HCLK/26 = 500kbps */ writel(0, &lslave->regs->imsc); /* do not raise any interrupts */ writel(0, &lslave->regs->icr); /* clear any pending interrupt */ writel(0, &lslave->regs->dmacr); /* do not do DMAs */ writel(SSP_CR1_SSP_ENABLE, &lslave->regs->cr1); /* enable SSP0 */ return &lslave->slave; } void spi_free_slave(struct spi_slave *slave) { struct lpc32xx_spi_slave *lslave = to_lpc32xx_spi_slave(slave); debug("(lpc32xx) spi_free_slave: 0x%08x\n", (u32)lslave); free(lslave); } int spi_claim_bus(struct spi_slave *slave) { /* only one bus and slave so far, always available */ return 0; } int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout, void *din, unsigned long flags) { struct lpc32xx_spi_slave *lslave = to_lpc32xx_spi_slave(slave); int bytelen = bitlen >> 3; int idx_out = 0; int idx_in = 0; int start_time; start_time = get_timer(0); while ((idx_out < bytelen) || (idx_in < bytelen)) { int status = readl(&lslave->regs->sr); if ((idx_out < bytelen) && (status & SSP_SR_TNF)) writel(((u8 *)dout)[idx_out++], &lslave->regs->data); if ((idx_in < bytelen) && (status & status & SSP_SR_RNE)) ((u8 *)din)[idx_in++] = readl(&lslave->regs->data); if (get_timer(start_time) >= CONFIG_LPC32XX_SSP_TIMEOUT) return -1; } return 0; } void spi_release_bus(struct spi_slave *slave) { /* do nothing */ }
mdxy2010/forlinux-ok6410
u-boot15/drivers/spi/lpc32xx_ssp.c
C
gpl-2.0
3,430
/* * TI OMAP I2C master mode driver * * Copyright (C) 2003 MontaVista Software, Inc. * Copyright (C) 2004 Texas Instruments. * * Updated to work with multiple I2C interfaces on 24xx by * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com> * Copyright (C) 2005 Nokia Corporation * * Cleaned up by Juha Yrjölä <juha.yrjola@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <asm/io.h> /* timeout waiting for the controller to respond */ #define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) #define OMAP_I2C_REV_REG 0x00 #define OMAP_I2C_IE_REG 0x04 #define OMAP_I2C_STAT_REG 0x08 #define OMAP_I2C_IV_REG 0x0c #define OMAP_I2C_SYSS_REG 0x10 #define OMAP_I2C_BUF_REG 0x14 #define OMAP_I2C_CNT_REG 0x18 #define OMAP_I2C_DATA_REG 0x1c #define OMAP_I2C_SYSC_REG 0x20 #define OMAP_I2C_CON_REG 0x24 #define OMAP_I2C_OA_REG 0x28 #define OMAP_I2C_SA_REG 0x2c #define OMAP_I2C_PSC_REG 0x30 #define OMAP_I2C_SCLL_REG 0x34 #define OMAP_I2C_SCLH_REG 0x38 #define OMAP_I2C_SYSTEST_REG 0x3c /* I2C Interrupt Enable Register (OMAP_I2C_IE): */ #define OMAP_I2C_IE_XRDY (1 << 4) /* TX data ready int enable */ #define OMAP_I2C_IE_RRDY (1 << 3) /* RX data ready int enable */ #define OMAP_I2C_IE_ARDY (1 << 2) /* Access ready int enable */ #define OMAP_I2C_IE_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_IE_AL (1 << 0) /* Arbitration lost int ena */ /* I2C Status Register (OMAP_I2C_STAT): */ #define OMAP_I2C_STAT_SBD (1 << 15) /* Single byte data */ #define OMAP_I2C_STAT_BB (1 << 12) /* Bus busy */ #define OMAP_I2C_STAT_ROVR (1 << 11) /* Receive overrun */ #define OMAP_I2C_STAT_XUDF (1 << 10) /* Transmit underflow */ #define OMAP_I2C_STAT_AAS (1 << 9) /* Address as slave */ #define OMAP_I2C_STAT_AD0 (1 << 8) /* Address zero */ #define OMAP_I2C_STAT_XRDY (1 << 4) /* Transmit data ready */ #define OMAP_I2C_STAT_RRDY (1 << 3) /* Receive data ready */ #define OMAP_I2C_STAT_ARDY (1 << 2) /* Register access ready */ #define OMAP_I2C_STAT_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_STAT_AL (1 << 0) /* Arbitration lost int ena */ /* I2C Buffer Configuration Register (OMAP_I2C_BUF): */ #define OMAP_I2C_BUF_RDMA_EN (1 << 15) /* RX DMA channel enable */ #define OMAP_I2C_BUF_XDMA_EN (1 << 7) /* TX DMA channel enable */ /* I2C Configuration Register (OMAP_I2C_CON): */ #define OMAP_I2C_CON_EN (1 << 15) /* I2C module enable */ #define OMAP_I2C_CON_BE (1 << 14) /* Big endian mode */ #define OMAP_I2C_CON_STB (1 << 11) /* Start byte mode (master) */ #define OMAP_I2C_CON_MST (1 << 10) /* Master/slave mode */ #define OMAP_I2C_CON_TRX (1 << 9) /* TX/RX mode (master only) */ #define OMAP_I2C_CON_XA (1 << 8) /* Expand address */ #define OMAP_I2C_CON_RM (1 << 2) /* Repeat mode (master only) */ #define OMAP_I2C_CON_STP (1 << 1) /* Stop cond (master only) */ #define OMAP_I2C_CON_STT (1 << 0) /* Start condition (master) */ /* I2C System Test Register (OMAP_I2C_SYSTEST): */ #ifdef DEBUG #define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */ #define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */ #define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */ #define OMAP_I2C_SYSTEST_TMODE_SHIFT (12) /* Test mode select */ #define OMAP_I2C_SYSTEST_SCL_I (1 << 3) /* SCL line sense in */ #define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */ #define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */ #define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */ #endif /* I2C System Status register (OMAP_I2C_SYSS): */ #define OMAP_I2C_SYSS_RDONE (1 << 0) /* Reset Done */ /* I2C System Configuration Register (OMAP_I2C_SYSC): */ #define OMAP_I2C_SYSC_SRST (1 << 1) /* Soft Reset */ /* REVISIT: Use platform_data instead of module parameters */ /* Fast Mode = 400 kHz, Standard = 100 kHz */ static int clock = 100; /* Default: 100 kHz */ module_param(clock, int, 0); MODULE_PARM_DESC(clock, "Set I2C clock in kHz: 400=fast mode (default == 100)"); struct omap_i2c_dev { struct device *dev; void __iomem *base; /* virtual */ int irq; struct clk *iclk; /* Interface clock */ struct clk *fclk; /* Functional clock */ struct completion cmd_complete; struct resource *ioarea; u16 cmd_err; u8 *buf; size_t buf_len; struct i2c_adapter adapter; unsigned rev1:1; }; static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, int reg, u16 val) { __raw_writew(val, i2c_dev->base + reg); } static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) { return __raw_readw(i2c_dev->base + reg); } static int omap_i2c_get_clocks(struct omap_i2c_dev *dev) { if (cpu_is_omap16xx() || cpu_is_omap24xx()) { dev->iclk = clk_get(dev->dev, "i2c_ick"); if (IS_ERR(dev->iclk)) { dev->iclk = NULL; return -ENODEV; } } dev->fclk = clk_get(dev->dev, "i2c_fck"); if (IS_ERR(dev->fclk)) { if (dev->iclk != NULL) { clk_put(dev->iclk); dev->iclk = NULL; } dev->fclk = NULL; return -ENODEV; } return 0; } static void omap_i2c_put_clocks(struct omap_i2c_dev *dev) { clk_put(dev->fclk); dev->fclk = NULL; if (dev->iclk != NULL) { clk_put(dev->iclk); dev->iclk = NULL; } } static void omap_i2c_enable_clocks(struct omap_i2c_dev *dev) { if (dev->iclk != NULL) clk_enable(dev->iclk); clk_enable(dev->fclk); } static void omap_i2c_disable_clocks(struct omap_i2c_dev *dev) { if (dev->iclk != NULL) clk_disable(dev->iclk); clk_disable(dev->fclk); } static int omap_i2c_init(struct omap_i2c_dev *dev) { u16 psc = 0; unsigned long fclk_rate = 12000000; unsigned long timeout; if (!dev->rev1) { omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, OMAP_I2C_SYSC_SRST); /* For some reason we need to set the EN bit before the * reset done bit gets set. */ timeout = jiffies + OMAP_I2C_TIMEOUT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) & OMAP_I2C_SYSS_RDONE)) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting " "for controller reset\n"); return -ETIMEDOUT; } msleep(1); } } omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); if (cpu_class_is_omap1()) { struct clk *armxor_ck; armxor_ck = clk_get(NULL, "armxor_ck"); if (IS_ERR(armxor_ck)) dev_warn(dev->dev, "Could not get armxor_ck\n"); else { fclk_rate = clk_get_rate(armxor_ck); clk_put(armxor_ck); } /* TRM for 5912 says the I2C clock must be prescaled to be * between 7 - 12 MHz. The XOR input clock is typically * 12, 13 or 19.2 MHz. So we should have code that produces: * * XOR MHz Divider Prescaler * 12 1 0 * 13 2 1 * 19.2 2 1 */ if (fclk_rate > 12000000) psc = fclk_rate / 12000000; } /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc); /* Program desired operating rate */ fclk_rate /= (psc + 1) * 1000; if (psc > 2) psc = 2; omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, fclk_rate / (clock * 2) - 7 + psc); omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, fclk_rate / (clock * 2) - 7 + psc); /* Take the I2C module out of reset: */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); /* Enable interrupts */ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | OMAP_I2C_IE_AL)); return 0; } /* * Waiting on Bus Busy */ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev) { unsigned long timeout; timeout = jiffies + OMAP_I2C_TIMEOUT; while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); return -ETIMEDOUT; } msleep(1); } return 0; } /* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int r; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); if (msg->len == 0) return -EINVAL; omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ dev->buf = msg->buf; dev->buf_len = msg->len; omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); init_completion(&dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (stop) w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); dev->buf_len = 0; if (r < 0) return r; if (r == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; } if (likely(!dev->cmd_err)) return 0; /* We have an error */ if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_init(dev); return -EIO; } if (dev->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; if (stop) { w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } return -EREMOTEIO; } return -EIO; } /* * Prepare controller for a transaction and call omap_i2c_xfer_msg * to do the work during IRQ processing. */ static int omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int i; int r; omap_i2c_enable_clocks(dev); /* REVISIT: initialize and use adap->retries. This is an optional * feature */ if ((r = omap_i2c_wait_for_bb(dev)) < 0) goto out; for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); if (r != 0) break; } if (r == 0) r = num; out: omap_i2c_disable_clocks(dev); return r; } static u32 omap_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static inline void omap_i2c_complete_cmd(struct omap_i2c_dev *dev, u16 err) { dev->cmd_err |= err; complete(&dev->cmd_complete); } static inline void omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat) { omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); } static irqreturn_t omap_i2c_rev1_isr(int this_irq, void *dev_id) { struct omap_i2c_dev *dev = dev_id; u16 iv, w; iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); switch (iv) { case 0x00: /* None */ break; case 0x01: /* Arbitration lost */ dev_err(dev->dev, "Arbitration lost\n"); omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL); break; case 0x02: /* No acknowledgement */ omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP); break; case 0x03: /* Register access ready */ omap_i2c_complete_cmd(dev, 0); break; case 0x04: /* Receive data ready */ if (dev->buf_len) { w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG); *dev->buf++ = w; dev->buf_len--; if (dev->buf_len) { *dev->buf++ = w >> 8; dev->buf_len--; } } else dev_err(dev->dev, "RRDY IRQ while no data requested\n"); break; case 0x05: /* Transmit data ready */ if (dev->buf_len) { w = *dev->buf++; dev->buf_len--; if (dev->buf_len) { w |= *dev->buf++ << 8; dev->buf_len--; } omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); } else dev_err(dev->dev, "XRDY IRQ while no data to send\n"); break; default: return IRQ_NONE; } return IRQ_HANDLED; } static irqreturn_t omap_i2c_isr(int this_irq, void *dev_id) { struct omap_i2c_dev *dev = dev_id; u16 bits; u16 stat, w; int count = 0; bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & bits) { dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat); if (count++ == 100) { dev_warn(dev->dev, "Too much work in one IRQ\n"); break; } omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); if (stat & OMAP_I2C_STAT_ARDY) { omap_i2c_complete_cmd(dev, 0); continue; } if (stat & OMAP_I2C_STAT_RRDY) { w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG); if (dev->buf_len) { *dev->buf++ = w; dev->buf_len--; if (dev->buf_len) { *dev->buf++ = w >> 8; dev->buf_len--; } } else dev_err(dev->dev, "RRDY IRQ while no data " "requested\n"); omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RRDY); continue; } if (stat & OMAP_I2C_STAT_XRDY) { w = 0; if (dev->buf_len) { w = *dev->buf++; dev->buf_len--; if (dev->buf_len) { w |= *dev->buf++ << 8; dev->buf_len--; } } else dev_err(dev->dev, "XRDY IRQ while no " "data to send\n"); omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XRDY); continue; } if (stat & OMAP_I2C_STAT_ROVR) { dev_err(dev->dev, "Receive overrun\n"); dev->cmd_err |= OMAP_I2C_STAT_ROVR; } if (stat & OMAP_I2C_STAT_XUDF) { dev_err(dev->dev, "Transmit overflow\n"); dev->cmd_err |= OMAP_I2C_STAT_XUDF; } if (stat & OMAP_I2C_STAT_NACK) { omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP); } if (stat & OMAP_I2C_STAT_AL) { dev_err(dev->dev, "Arbitration lost\n"); omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL); } } return count ? IRQ_HANDLED : IRQ_NONE; } static const struct i2c_algorithm omap_i2c_algo = { .master_xfer = omap_i2c_xfer, .functionality = omap_i2c_func, }; static int omap_i2c_probe(struct platform_device *pdev) { struct omap_i2c_dev *dev; struct i2c_adapter *adap; struct resource *mem, *irq, *ioarea; int r; /* NOTE: driver uses the static register mapping */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, pdev->name); if (!ioarea) { dev_err(&pdev->dev, "I2C region already claimed\n"); return -EBUSY; } if (clock > 200) clock = 400; /* Fast mode */ else clock = 100; /* Standard mode */ dev = kzalloc(sizeof(struct omap_i2c_dev), GFP_KERNEL); if (!dev) { r = -ENOMEM; goto err_release_region; } dev->dev = &pdev->dev; dev->irq = irq->start; dev->base = (void __iomem *) IO_ADDRESS(mem->start); platform_set_drvdata(pdev, dev); if ((r = omap_i2c_get_clocks(dev)) != 0) goto err_free_mem; omap_i2c_enable_clocks(dev); if (cpu_is_omap15xx()) dev->rev1 = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) < 0x20; /* reset ASAP, clearing any IRQs */ omap_i2c_init(dev); r = request_irq(dev->irq, dev->rev1 ? omap_i2c_rev1_isr : omap_i2c_isr, 0, pdev->name, dev); if (r) { dev_err(dev->dev, "failure requesting irq %i\n", dev->irq); goto err_unuse_clocks; } r = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff; dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", pdev->id, r >> 4, r & 0xf, clock); adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; strncpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->dev.parent = &pdev->dev; /* i2c device drivers may be active on return from add_adapter() */ adap->nr = pdev->id; r = i2c_add_numbered_adapter(adap); if (r) { dev_err(dev->dev, "failure adding adapter\n"); goto err_free_irq; } omap_i2c_disable_clocks(dev); return 0; err_free_irq: free_irq(dev->irq, dev); err_unuse_clocks: omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); omap_i2c_disable_clocks(dev); omap_i2c_put_clocks(dev); err_free_mem: platform_set_drvdata(pdev, NULL); kfree(dev); err_release_region: release_mem_region(mem->start, (mem->end - mem->start) + 1); return r; } static int omap_i2c_remove(struct platform_device *pdev) { struct omap_i2c_dev *dev = platform_get_drvdata(pdev); struct resource *mem; platform_set_drvdata(pdev, NULL); free_irq(dev->irq, dev); i2c_del_adapter(&dev->adapter); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); omap_i2c_put_clocks(dev); kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, (mem->end - mem->start) + 1); return 0; } static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "i2c_omap", .owner = THIS_MODULE, }, }; /* I2C may be needed to bring up other drivers */ static int __init omap_i2c_init_driver(void) { return platform_driver_register(&omap_i2c_driver); } subsys_initcall(omap_i2c_init_driver); static void __exit omap_i2c_exit_driver(void) { platform_driver_unregister(&omap_i2c_driver); } module_exit(omap_i2c_exit_driver); MODULE_AUTHOR("MontaVista Software, Inc. (and others)"); MODULE_DESCRIPTION("TI OMAP I2C bus adapter"); MODULE_LICENSE("GPL");
janrinze/loox7xxport.loox2624
drivers/i2c/busses/i2c-omap.c
C
gpl-2.0
18,070
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <drm/amdgpu_drm.h> #include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" #include "linux/delay.h" int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { struct pp_smumgr *smumgr; if ((handle == NULL) || (pp_init == NULL)) return -EINVAL; smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); if (smumgr == NULL) return -ENOMEM; smumgr->device = pp_init->device; smumgr->chip_family = pp_init->chip_family; smumgr->chip_id = pp_init->chip_id; smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; smumgr->reload_fw = 1; handle->smu_mgr = smumgr; switch (smumgr->chip_family) { case AMDGPU_FAMILY_CZ: cz_smum_init(smumgr); break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { case CHIP_TOPAZ: iceland_smum_init(smumgr); break; case CHIP_TONGA: tonga_smum_init(smumgr); break; case CHIP_FIJI: fiji_smum_init(smumgr); break; case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: polaris10_smum_init(smumgr); break; default: return -EINVAL; } break; default: kfree(smumgr); return -EINVAL; } return 0; } int smum_fini(struct pp_smumgr *smumgr) { kfree(smumgr->device); kfree(smumgr); return 0; } int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); return 0; } int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); return 0; } int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); return 0; } int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); return 0; } uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) { if (NULL != smumgr->smumgr_funcs->get_offsetof) return smumgr->smumgr_funcs->get_offsetof(type, member); return 0; } int smum_process_firmware_header(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); return 0; } int smum_get_argument(struct pp_smumgr *smumgr) { if (NULL != smumgr->smumgr_funcs->get_argument) return smumgr->smumgr_funcs->get_argument(smumgr); return 0; } uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) { if (NULL != smumgr->smumgr_funcs->get_mac_definition) return smumgr->smumgr_funcs->get_mac_definition(value); return 0; } int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table) { if (NULL != smumgr->smumgr_funcs->download_pptable_settings) return smumgr->smumgr_funcs->download_pptable_settings(smumgr, table); return 0; } int smum_upload_powerplay_table(struct pp_smumgr *smumgr) { if (NULL != smumgr->smumgr_funcs->upload_pptable_settings) return smumgr->smumgr_funcs->upload_pptable_settings(smumgr); return 0; } int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) { if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL) return -EINVAL; return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg); } int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) { if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) return -EINVAL; return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( smumgr, msg, parameter); } /* * Returns once the part of the register indicated by the mask has * reached the given value. */ int smum_wait_on_register(struct pp_smumgr *smumgr, uint32_t index, uint32_t value, uint32_t mask) { uint32_t i; uint32_t cur_value; if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; for (i = 0; i < smumgr->usec_timeout; i++) { cur_value = cgs_read_register(smumgr->device, index); if ((cur_value & mask) == (value & mask)) break; udelay(1); } /* timeout means wrong logic*/ if (i == smumgr->usec_timeout) return -1; return 0; } int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, uint32_t index, uint32_t value, uint32_t mask) { uint32_t i; uint32_t cur_value; if (smumgr == NULL) return -EINVAL; for (i = 0; i < smumgr->usec_timeout; i++) { cur_value = cgs_read_register(smumgr->device, index); if ((cur_value & mask) != (value & mask)) break; udelay(1); } /* timeout means wrong logic */ if (i == smumgr->usec_timeout) return -1; return 0; } /* * Returns once the part of the register indicated by the mask * has reached the given value.The indirect space is described by * giving the memory-mapped index of the indirect index register. */ int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; cgs_write_register(smumgr->device, indirect_port, index); return smum_wait_on_register(smumgr, indirect_port + 1, mask, value); } void smum_wait_for_indirect_register_unequal( struct pp_smumgr *smumgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask) { if (smumgr == NULL || smumgr->device == NULL) return; cgs_write_register(smumgr->device, indirect_port, index); smum_wait_for_register_unequal(smumgr, indirect_port + 1, value, mask); } int smu_allocate_memory(void *device, uint32_t size, enum cgs_gpu_mem_type type, uint32_t byte_align, uint64_t *mc_addr, void **kptr, void *handle) { int ret = 0; cgs_handle_t cgs_handle; if (device == NULL || handle == NULL || mc_addr == NULL || kptr == NULL) return -EINVAL; ret = cgs_alloc_gpu_mem(device, type, size, byte_align, 0, 0, (cgs_handle_t *)handle); if (ret) return -ENOMEM; cgs_handle = *(cgs_handle_t *)handle; ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr); if (ret) goto error_gmap; ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr); if (ret) goto error_kmap; return 0; error_kmap: cgs_gunmap_gpu_mem(device, cgs_handle); error_gmap: cgs_free_gpu_mem(device, cgs_handle); return ret; } int smu_free_memory(void *device, void *handle) { cgs_handle_t cgs_handle = (cgs_handle_t)handle; if (device == NULL || handle == NULL) return -EINVAL; cgs_kunmap_gpu_mem(device, cgs_handle); cgs_gunmap_gpu_mem(device, cgs_handle); cgs_free_gpu_mem(device, cgs_handle); return 0; } int smum_init_smc_table(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); return 0; } int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); return 0; } int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); return 0; } /*this interface is needed by island ci/vi */ int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); return 0; } bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); return true; }
endocode/linux
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
C
gpl-2.0
9,245
/* Try writing a file in the most normal way. */ #include <syscall.h> #include "tests/userprog/sample.inc" #include "tests/lib.h" #include "tests/main.h" void test_main (void) { int handle, byte_cnt; CHECK (create ("test.txt", sizeof sample - 1), "create \"test.txt\""); CHECK ((handle = open ("test.txt")) > 1, "open \"test.txt\""); byte_cnt = write (handle, sample, sizeof sample - 1); if (byte_cnt != sizeof sample - 1) fail ("write() returned %d instead of %zu", byte_cnt, sizeof sample - 1); }
pindexis/pintos-tn
src/tests/userprog/write-normal.c
C
apache-2.0
519
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**=========================================================================== \file wlan_hdd_softap_tx_rx.c \brief Linux HDD Tx/RX APIs ==========================================================================*/ /*--------------------------------------------------------------------------- Include files -------------------------------------------------------------------------*/ #include <linux/semaphore.h> #include <wlan_hdd_tx_rx.h> #include <wlan_hdd_softap_tx_rx.h> #include <wlan_hdd_dp_utils.h> #include <wlan_qct_tl.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> //#include <vos_list.h> #include <vos_types.h> #include <aniGlobal.h> #include <halTypes.h> #include <net/ieee80211_radiotap.h> #include <linux/ratelimit.h> #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) #include <soc/qcom/subsystem_restart.h> #else #include <mach/subsystem_restart.h> #endif /*--------------------------------------------------------------------------- Preprocessor definitions and constants -------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------- Type declarations -------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------- Function definitions and documenation -------------------------------------------------------------------------*/ #if 0 static void hdd_softap_dump_sk_buff(struct sk_buff * skb) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: head = %p", __func__, skb->head); //VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: data = %p", __func__, skb->data); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: tail = %p", __func__, skb->tail); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: end = %p", __func__, skb->end); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: len = %d", __func__, skb->len); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: data_len = %d", __func__, skb->data_len); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s: mac_len = %d", __func__, skb->mac_len); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7]); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", skb->data[8], skb->data[9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15]); } #endif extern void hdd_set_wlan_suspend_mode(bool suspend); #define HDD_SAP_TX_TIMEOUT_RATELIMIT_INTERVAL 20*HZ #define HDD_SAP_TX_TIMEOUT_RATELIMIT_BURST 1 #define HDD_SAP_TX_STALL_SSR_THRESHOLD 5 static DEFINE_RATELIMIT_STATE(hdd_softap_tx_timeout_rs, \ HDD_SAP_TX_TIMEOUT_RATELIMIT_INTERVAL, \ HDD_SAP_TX_TIMEOUT_RATELIMIT_BURST); /**============================================================================ @brief hdd_softap_traffic_monitor_timeout_handler() - SAP/P2P GO traffin monitor timeout handler function If no traffic during programmed time, trigger suspand mode @param pUsrData : [in] pointer to hdd context @return : NONE ===========================================================================*/ void hdd_softap_traffic_monitor_timeout_handler( void *pUsrData ) { hdd_context_t *pHddCtx = (hdd_context_t *)pUsrData; v_TIME_t currentTS; if (NULL == pHddCtx) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Invalid user data, context", __func__); return; } currentTS = vos_timer_get_system_time(); if (pHddCtx->cfg_ini->trafficIdleTimeout < (currentTS - pHddCtx->traffic_monitor.lastFrameTs)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: No Data Activity calling Wlan Suspend", __func__ ); hdd_set_wlan_suspend_mode(1); atomic_set(&pHddCtx->traffic_monitor.isActiveMode, 0); } else { vos_timer_start(&pHddCtx->traffic_monitor.trafficTimer, pHddCtx->cfg_ini->trafficIdleTimeout); } return; } VOS_STATUS hdd_start_trafficMonitor( hdd_adapter_t *pAdapter ) { hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter); VOS_STATUS status = VOS_STATUS_SUCCESS; status = wlan_hdd_validate_context(pHddCtx); if (0 != status) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s: HDD context is not valid", __func__); return status; } if ((pHddCtx->cfg_ini->enableTrafficMonitor) && (!pHddCtx->traffic_monitor.isInitialized)) { atomic_set(&pHddCtx->traffic_monitor.isActiveMode, 1); vos_timer_init(&pHddCtx->traffic_monitor.trafficTimer, VOS_TIMER_TYPE_SW, hdd_softap_traffic_monitor_timeout_handler, pHddCtx); vos_lock_init(&pHddCtx->traffic_monitor.trafficLock); pHddCtx->traffic_monitor.isInitialized = 1; pHddCtx->traffic_monitor.lastFrameTs = 0; /* Start traffic monitor timer here * If no AP assoc, immediatly go into suspend */ VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s Start Traffic Monitor Timer", __func__); vos_timer_start(&pHddCtx->traffic_monitor.trafficTimer, pHddCtx->cfg_ini->trafficIdleTimeout); } else { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s Traffic Monitor is not Enable in ini file", __func__); } return status; } VOS_STATUS hdd_stop_trafficMonitor( hdd_adapter_t *pAdapter ) { hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter); VOS_STATUS status = VOS_STATUS_SUCCESS; status = wlan_hdd_validate_context(pHddCtx); if (-ENODEV == status) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s: HDD context is not valid", __func__); return status; } if (pHddCtx->traffic_monitor.isInitialized) { if (VOS_TIMER_STATE_STOPPED != vos_timer_getCurrentState(&pHddCtx->traffic_monitor.trafficTimer)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s Stop Traffic Monitor Timer", __func__); vos_timer_stop(&pHddCtx->traffic_monitor.trafficTimer); } VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s Destroy Traffic Monitor Timer", __func__); vos_timer_destroy(&pHddCtx->traffic_monitor.trafficTimer); vos_lock_destroy(&pHddCtx->traffic_monitor.trafficLock); pHddCtx->traffic_monitor.isInitialized = 0; } return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_softap_flush_tx_queues() - Utility function to flush the TX queues @param pAdapter : [in] pointer to adapter context @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ static VOS_STATUS hdd_softap_flush_tx_queues( hdd_adapter_t *pAdapter ) { VOS_STATUS status = VOS_STATUS_SUCCESS; v_SINT_t i = -1; v_U8_t STAId = 0; hdd_list_node_t *anchor = NULL; skb_list_node_t *pktNode = NULL; struct sk_buff *skb = NULL; spin_lock_bh( &pAdapter->staInfo_lock ); for (STAId = 0; STAId < WLAN_MAX_STA_COUNT; STAId++) { if (FALSE == pAdapter->aStaInfo[STAId].isUsed) { continue; } for (i = 0; i < NUM_TX_QUEUES; i ++) { spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i].lock); while (true) { status = hdd_list_remove_front ( &pAdapter->aStaInfo[STAId].wmm_tx_queue[i], &anchor); if (VOS_STATUS_E_EMPTY != status) { //If success then we got a valid packet from some AC pktNode = list_entry(anchor, skb_list_node_t, anchor); skb = pktNode->skb; ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFlushed; ++pAdapter->hdd_stats.hddTxRxStats.txFlushedAC[i]; kfree_skb(skb); continue; } //current list is empty break; } pAdapter->aStaInfo[STAId].txSuspended[i] = VOS_FALSE; spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i].lock); } pAdapter->aStaInfo[STAId].vosLowResource = VOS_FALSE; } spin_unlock_bh( &pAdapter->staInfo_lock ); return status; } /**============================================================================ @brief hdd_softap_hard_start_xmit() - Function registered with the Linux OS for transmitting packets. There are 2 versions of this function. One that uses locked queue and other that uses lockless queues. Both have been retained to do some performance testing @param skb : [in] pointer to OS packet (sk_buff) @param dev : [in] pointer to Libra network device @return : NET_XMIT_DROP if packets are dropped : NET_XMIT_SUCCESS if packet is enqueued succesfully ===========================================================================*/ int hdd_softap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { VOS_STATUS status; WLANTL_ACEnumType ac = WLANTL_AC_BE; sme_QosWmmUpType up = SME_QOS_WMM_UP_BE; skb_list_node_t *pktNode = NULL; v_SIZE_t pktListSize = 0; v_BOOL_t txSuspended = VOS_FALSE; hdd_adapter_t *pAdapter = (hdd_adapter_t *)netdev_priv(dev); hdd_ap_ctx_t *pHddApCtx = WLAN_HDD_GET_AP_CTX_PTR(pAdapter); vos_list_node_t *anchor = NULL; v_U8_t STAId = WLAN_MAX_STA_COUNT; //Extract the destination address from ethernet frame v_MACADDR_t *pDestMacAddress = (v_MACADDR_t*)skb->data; int os_status = NETDEV_TX_OK; pDestMacAddress = (v_MACADDR_t*)skb->data; ++pAdapter->hdd_stats.hddTxRxStats.txXmitCalled; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: enter", __func__); spin_lock_bh( &pAdapter->staInfo_lock ); if (vos_is_macaddr_broadcast( pDestMacAddress ) || vos_is_macaddr_group(pDestMacAddress)) { //The BC/MC station ID is assigned during BSS starting phase. SAP will return the station //ID used for BC/MC traffic. The station id is registered to TL as well. STAId = pHddApCtx->uBCStaId; /* Setting priority for broadcast packets which doesn't go to select_queue function */ skb->priority = SME_QOS_WMM_UP_BE; skb->queue_mapping = HDD_LINUX_AC_BE; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_LOW, "%s: BC/MC packet", __func__); } else { STAId = *(v_U8_t *)(((v_U8_t *)(skb->data)) - 1); if (STAId == HDD_WLAN_INVALID_STA_ID) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: Failed to find right station", __func__); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; kfree_skb(skb); goto xmit_done; } else if (FALSE == pAdapter->aStaInfo[STAId].isUsed ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: STA %d is unregistered", __func__, STAId); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; kfree_skb(skb); goto xmit_done; } if ( (WLANTL_STA_CONNECTED != pAdapter->aStaInfo[STAId].tlSTAState) && (WLANTL_STA_AUTHENTICATED != pAdapter->aStaInfo[STAId].tlSTAState) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: Station not connected yet", __func__); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; kfree_skb(skb); goto xmit_done; } else if(WLANTL_STA_CONNECTED == pAdapter->aStaInfo[STAId].tlSTAState) { if(ntohs(skb->protocol) != HDD_ETHERTYPE_802_1_X) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: NON-EAPOL packet in non-Authenticated state", __func__); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; kfree_skb(skb); goto xmit_done; } } } //Get TL AC corresponding to Qdisc queue index/AC. ac = hdd_QdiscAcToTlAC[skb->queue_mapping]; //user priority from IP header, which is already extracted and set from //select_queue call back function up = skb->priority; ++pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[ac]; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: Classified as ac %d up %d", __func__, ac, up); // If the memory differentiation mode is enabled, the memory limit of each queue will be // checked. Over-limit packets will be dropped. spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); hdd_list_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &pktListSize); if(pktListSize >= pAdapter->aTxQueueLimit[ac]) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: station %d ac %d queue over limit %d", __func__, STAId, ac, pktListSize); pAdapter->aStaInfo[STAId].txSuspended[ac] = VOS_TRUE; netif_stop_subqueue(dev, skb_get_queue_mapping(skb)); txSuspended = VOS_TRUE; } /* If 3/4th of the max queue size is used then enable the flag. * This flag indicates to place the DHCP packets in VOICE AC queue.*/ if (WLANTL_AC_BE == ac) { if (pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].count >= HDD_TX_QUEUE_LOW_WATER_MARK) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: TX queue for Best Effort AC is 3/4th full", __func__); pAdapter->aStaInfo[STAId].vosLowResource = VOS_TRUE; } else { pAdapter->aStaInfo[STAId].vosLowResource = VOS_FALSE; } } spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); if (VOS_TRUE == txSuspended) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "%s: TX queue full for AC=%d Disable OS TX queue", __func__, ac ); os_status = NETDEV_TX_BUSY; goto xmit_done; } //Use the skb->cb field to hold the list node information pktNode = (skb_list_node_t *)&skb->cb; //Stick the OS packet inside this node. pktNode->skb = skb; //Stick the User Priority inside this node pktNode->userPriority = up; INIT_LIST_HEAD(&pktNode->anchor); spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); status = hdd_list_insert_back_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &pktNode->anchor, &pktListSize ); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s:Insert Tx queue failed. Pkt dropped", __func__); ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac]; ++pAdapter->stats.tx_dropped; kfree_skb(skb); goto xmit_done; } ++pAdapter->hdd_stats.hddTxRxStats.txXmitQueued; ++pAdapter->hdd_stats.hddTxRxStats.txXmitQueuedAC[ac]; ++pAdapter->hdd_stats.hddTxRxStats.pkt_tx_count; if (1 == pktListSize) { //Let TL know we have a packet to send for this AC status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, STAId, ac ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: Failed to signal TL for AC=%d STAId =%d", __func__, ac, STAId ); //Remove the packet from queue. It must be at the back of the queue, as TX thread cannot preempt us in the middle //as we are in a soft irq context. Also it must be the same packet that we just allocated. spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); status = hdd_list_remove_back( &pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &anchor); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac]; kfree_skb(skb); goto xmit_done; } } dev->trans_start = jiffies; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_LOW, "%s: exit", __func__); xmit_done: spin_unlock_bh( &pAdapter->staInfo_lock ); return os_status; } /**============================================================================ @brief hdd_softap_sta_2_sta_xmit This function for Transmitting the frames when the traffic is between two stations. @param skb : [in] pointer to packet (sk_buff) @param dev : [in] pointer to Libra network device @param STAId : [in] Station Id of Destination Station @param up : [in] User Priority @return : NET_XMIT_DROP if packets are dropped : NET_XMIT_SUCCESS if packet is enqueued succesfully ===========================================================================*/ VOS_STATUS hdd_softap_sta_2_sta_xmit(struct sk_buff *skb, struct net_device *dev, v_U8_t STAId, v_U8_t up) { VOS_STATUS status = VOS_STATUS_SUCCESS; skb_list_node_t *pktNode = NULL; v_SIZE_t pktListSize = 0; hdd_adapter_t *pAdapter = (hdd_adapter_t *)netdev_priv(dev); v_U8_t ac; vos_list_node_t *anchor = NULL; ++pAdapter->hdd_stats.hddTxRxStats.txXmitCalled; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: enter", __func__); spin_lock_bh( &pAdapter->staInfo_lock ); if ( FALSE == pAdapter->aStaInfo[STAId].isUsed ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: STA %d is unregistered", __func__, STAId ); kfree_skb(skb); status = VOS_STATUS_E_FAILURE; goto xmit_end; } /* If the QoS is not enabled on the receiving station, then send it with BE priority */ if ( !pAdapter->aStaInfo[STAId].isQosEnabled ) up = SME_QOS_WMM_UP_BE; ac = hddWmmUpToAcMap[up]; ++pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[ac]; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: Classified as ac %d up %d", __func__, ac, up); skb->queue_mapping = hddLinuxUpToAcMap[up]; //Use the skb->cb field to hold the list node information pktNode = (skb_list_node_t *)&skb->cb; //Stick the OS packet inside this node. pktNode->skb = skb; //Stick the User Priority inside this node pktNode->userPriority = up; INIT_LIST_HEAD(&pktNode->anchor); spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); hdd_list_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &pktListSize); if(pAdapter->aStaInfo[STAId].txSuspended[ac] || pktListSize >= pAdapter->aTxQueueLimit[ac]) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: station %d ac %d queue over limit %d", __func__, STAId, ac, pktListSize); /* TODO:Rx Flowchart should be trigerred here to SUPEND SSC on RX side. * SUSPEND should be done based on Threshold. RESUME would be * triggered in fetch cbk after recovery. */ kfree_skb(skb); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); status = VOS_STATUS_E_FAILURE; goto xmit_end; } status = hdd_list_insert_back_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &pktNode->anchor, &pktListSize ); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s:Insert Tx queue failed. Pkt dropped", __func__); ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac]; ++pAdapter->stats.tx_dropped; kfree_skb(skb); status = VOS_STATUS_E_FAILURE; goto xmit_end; } ++pAdapter->hdd_stats.hddTxRxStats.txXmitQueued; ++pAdapter->hdd_stats.hddTxRxStats.txXmitQueuedAC[ac]; if (1 == pktListSize) { //Let TL know we have a packet to send for this AC //VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR,"%s:Indicating Packet to TL", __func__); status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, STAId, ac ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: Failed to signal TL for AC=%d STAId =%d", __func__, ac, STAId ); //Remove the packet from queue. It must be at the back of the queue, as TX thread cannot preempt us in the middle //as we are in a soft irq context. Also it must be the same packet that we just allocated. spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); status = hdd_list_remove_back( &pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &anchor); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDropped; ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac]; kfree_skb(skb); status = VOS_STATUS_E_FAILURE; goto xmit_end; } } VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_LOW, "%s: exit", __func__); xmit_end: spin_unlock_bh( &pAdapter->staInfo_lock ); return status; } /**============================================================================ @brief hdd_softap_tx_timeout() - Function called by OS if there is any timeout during transmission. Since HDD simply enqueues packet and returns control to OS right away, this would never be invoked @param dev : [in] pointer to Libra network device @return : None ===========================================================================*/ void hdd_softap_tx_timeout(struct net_device *dev) { hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev); struct netdev_queue *txq; int i = 0; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Transmission timeout occurred", __func__); if ( NULL == pAdapter ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, FL("pAdapter is NULL")); VOS_ASSERT(0); return; } ++pAdapter->hdd_stats.hddTxRxStats.txTimeoutCount; for (i = 0; i < 8; i++) { txq = netdev_get_tx_queue(dev, i); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "Queue%d status: %d", i, netif_tx_queue_stopped(txq)); } VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "carrier state: %d", netif_carrier_ok(dev)); ++pAdapter->hdd_stats.hddTxRxStats.continuousTxTimeoutCount; if (pAdapter->hdd_stats.hddTxRxStats.continuousTxTimeoutCount > HDD_SAP_TX_STALL_SSR_THRESHOLD) { // Driver could not recover, issue SSR VOS_TRACE(VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Cannot recover from Data stall Issue SSR", __func__); WLANTL_FatalError(); return; } /* If Tx stalled for a long time then *hdd_tx_timeout* is called * every 5sec. The TL debug spits out a lot of information on the * serial console, if it is called every time *hdd_tx_timeout* is * called then we may get a watchdog bite on the Application * processor, so ratelimit the TL debug logs. */ if (__ratelimit(&hdd_softap_tx_timeout_rs)) { hdd_wmm_tx_snapshot(pAdapter); WLANTL_TLDebugMessage(VOS_TRUE); } } /**============================================================================ @brief hdd_softap_stats() - Function registered with the Linux OS for device TX/RX statistic @param dev : [in] pointer to Libra network device @return : pointer to net_device_stats structure ===========================================================================*/ struct net_device_stats* hdd_softap_stats(struct net_device *dev) { hdd_adapter_t* priv = netdev_priv(dev); return &priv->stats; } /**============================================================================ @brief hdd_softap_init_tx_rx() - Init function to initialize Tx/RX modules in HDD @param pAdapter : [in] pointer to adapter context @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_init_tx_rx( hdd_adapter_t *pAdapter ) { VOS_STATUS status = VOS_STATUS_SUCCESS; v_SINT_t i = -1; v_SIZE_t size = 0; v_U8_t STAId = 0; v_U8_t pACWeights[] = { HDD_SOFTAP_BK_WEIGHT_DEFAULT, HDD_SOFTAP_BE_WEIGHT_DEFAULT, HDD_SOFTAP_VI_WEIGHT_DEFAULT, HDD_SOFTAP_VO_WEIGHT_DEFAULT }; pAdapter->isVosOutOfResource = VOS_FALSE; pAdapter->isVosLowResource = VOS_FALSE; vos_mem_zero(&pAdapter->stats, sizeof(struct net_device_stats)); while (++i != NUM_TX_QUEUES) hdd_list_init( &pAdapter->wmm_tx_queue[i], HDD_TX_QUEUE_MAX_LEN); /* Initial HDD buffer control / flow control fields*/ vos_pkt_get_available_buffer_pool (VOS_PKT_TYPE_TX_802_3_DATA, &size); pAdapter->aTxQueueLimit[WLANTL_AC_BK] = HDD_SOFTAP_TX_BK_QUEUE_MAX_LEN; pAdapter->aTxQueueLimit[WLANTL_AC_BE] = HDD_SOFTAP_TX_BE_QUEUE_MAX_LEN; pAdapter->aTxQueueLimit[WLANTL_AC_VI] = HDD_SOFTAP_TX_VI_QUEUE_MAX_LEN; pAdapter->aTxQueueLimit[WLANTL_AC_VO] = HDD_SOFTAP_TX_VO_QUEUE_MAX_LEN; spin_lock_init( &pAdapter->staInfo_lock ); for (STAId = 0; STAId < WLAN_MAX_STA_COUNT; STAId++) { vos_mem_zero(&pAdapter->aStaInfo[STAId], sizeof(hdd_station_info_t)); for (i = 0; i < NUM_TX_QUEUES; i ++) { hdd_list_init(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i], HDD_TX_QUEUE_MAX_LEN); } } /* Update the AC weights suitable for SoftAP mode of operation */ WLANTL_SetACWeights((WLAN_HDD_GET_CTX(pAdapter))->pvosContext, pACWeights); if (VOS_STATUS_SUCCESS != hdd_start_trafficMonitor(pAdapter)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: failed to start Traffic Monito timer ", __func__ ); return VOS_STATUS_E_INVAL; } return status; } /**============================================================================ @brief hdd_softap_deinit_tx_rx() - Deinit function to clean up Tx/RX modules in HDD @param pAdapter : [in] pointer to adapter context @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_deinit_tx_rx( hdd_adapter_t *pAdapter ) { VOS_STATUS status = VOS_STATUS_SUCCESS; if (VOS_STATUS_SUCCESS != hdd_stop_trafficMonitor(pAdapter)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Fail to Stop Traffic Monito timer", __func__ ); return VOS_STATUS_E_INVAL; } status = hdd_softap_flush_tx_queues(pAdapter); return status; } /**============================================================================ @brief hdd_softap_flush_tx_queues_sta() - Utility function to flush the TX queues of a station @param pAdapter : [in] pointer to adapter context @param STAId : [in] Station ID to deinit @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ static VOS_STATUS hdd_softap_flush_tx_queues_sta( hdd_adapter_t *pAdapter, v_U8_t STAId ) { v_U8_t i = -1; hdd_list_node_t *anchor = NULL; skb_list_node_t *pktNode = NULL; struct sk_buff *skb = NULL; if (FALSE == pAdapter->aStaInfo[STAId].isUsed) { return VOS_STATUS_SUCCESS; } for (i = 0; i < NUM_TX_QUEUES; i ++) { spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i].lock); while (true) { if (VOS_STATUS_E_EMPTY != hdd_list_remove_front(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i], &anchor)) { //If success then we got a valid packet from some AC pktNode = list_entry(anchor, skb_list_node_t, anchor); skb = pktNode->skb; ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFlushed; ++pAdapter->hdd_stats.hddTxRxStats.txFlushedAC[i]; kfree_skb(skb); continue; } //current list is empty break; } spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i].lock); } return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_softap_init_tx_rx_sta() - Init function to initialize a station in Tx/RX modules in HDD @param pAdapter : [in] pointer to adapter context @param STAId : [in] Station ID to deinit @param pmacAddrSTA : [in] pointer to the MAC address of the station @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_init_tx_rx_sta( hdd_adapter_t *pAdapter, v_U8_t STAId, v_MACADDR_t *pmacAddrSTA) { v_U8_t i = 0; spin_lock_bh( &pAdapter->staInfo_lock ); if (pAdapter->aStaInfo[STAId].isUsed) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Reinit station %d", __func__, STAId ); spin_unlock_bh( &pAdapter->staInfo_lock ); return VOS_STATUS_E_FAILURE; } vos_mem_zero(&pAdapter->aStaInfo[STAId], sizeof(hdd_station_info_t)); for (i = 0; i < NUM_TX_QUEUES; i ++) { hdd_list_init(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i], HDD_TX_QUEUE_MAX_LEN); } pAdapter->aStaInfo[STAId].isUsed = TRUE; pAdapter->aStaInfo[STAId].isDeauthInProgress = FALSE; vos_copy_macaddr( &pAdapter->aStaInfo[STAId].macAddrSTA, pmacAddrSTA); spin_unlock_bh( &pAdapter->staInfo_lock ); return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_softap_deinit_tx_rx_sta() - Deinit function to clean up a statioin in Tx/RX modules in HDD @param pAdapter : [in] pointer to adapter context @param STAId : [in] Station ID to deinit @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_deinit_tx_rx_sta ( hdd_adapter_t *pAdapter, v_U8_t STAId ) { VOS_STATUS status = VOS_STATUS_SUCCESS; v_U8_t ac; /**Track whether OS TX queue has been disabled.*/ v_BOOL_t txSuspended[NUM_TX_QUEUES]; v_U8_t tlAC; hdd_hostapd_state_t *pHostapdState; v_U8_t i; pHostapdState = WLAN_HDD_GET_HOSTAP_STATE_PTR(pAdapter); spin_lock_bh( &pAdapter->staInfo_lock ); if (FALSE == pAdapter->aStaInfo[STAId].isUsed) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Deinit station not inited %d", __func__, STAId ); spin_unlock_bh( &pAdapter->staInfo_lock ); return VOS_STATUS_E_FAILURE; } status = hdd_softap_flush_tx_queues_sta(pAdapter, STAId); pAdapter->aStaInfo[STAId].isUsed = FALSE; pAdapter->aStaInfo[STAId].isDeauthInProgress = FALSE; /* if this STA had any of its WMM TX queues suspended, then the associated queue on the network interface was disabled. check to see if that is the case, in which case we need to re-enable the interface queue. but we only do this if the BSS is running since, if the BSS is stopped, all of the interfaces have been stopped and should not be re-enabled */ if (BSS_START == pHostapdState->bssState) { for (ac = HDD_LINUX_AC_VO; ac <= HDD_LINUX_AC_BK; ac++) { tlAC = hdd_QdiscAcToTlAC[ac]; txSuspended[ac] = pAdapter->aStaInfo[STAId].txSuspended[tlAC]; } } vos_mem_zero(&pAdapter->aStaInfo[STAId], sizeof(hdd_station_info_t)); /* re-init spin lock, since netdev can still open adapter until * driver gets unloaded */ for (i = 0; i < NUM_TX_QUEUES; i ++) { hdd_list_init(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i], HDD_TX_QUEUE_MAX_LEN); } if (BSS_START == pHostapdState->bssState) { for (ac = HDD_LINUX_AC_VO; ac <= HDD_LINUX_AC_BK; ac++) { if (txSuspended[ac]) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: TX queue re-enabled", __func__); netif_wake_subqueue(pAdapter->dev, ac); } } } spin_unlock_bh( &pAdapter->staInfo_lock ); return status; } /**============================================================================ @brief hdd_softap_disconnect_tx_rx() - Disconnect function to clean up Tx/RX modules in HDD @param pAdapter : [in] pointer to adapter context @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_disconnect_tx_rx( hdd_adapter_t *pAdapter ) { return hdd_softap_flush_tx_queues(pAdapter); } /**============================================================================ @brief hdd_softap_tx_complete_cbk() - Callback function invoked by TL to indicate that a packet has been transmitted across the bus succesfully. OS packet resources can be released after this cbk. @param vosContext : [in] pointer to VOS context @param pVosPacket : [in] pointer to VOS packet (containing skb) @param vosStatusIn : [in] status of the transmission @return : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_tx_complete_cbk( v_VOID_t *vosContext, vos_pkt_t *pVosPacket, VOS_STATUS vosStatusIn ) { VOS_STATUS status = VOS_STATUS_SUCCESS; hdd_adapter_t *pAdapter = NULL; void* pOsPkt = NULL; if( ( NULL == vosContext ) || ( NULL == pVosPacket ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Null params being passed", __func__); return VOS_STATUS_E_FAILURE; } //Return the skb to the OS status = vos_pkt_get_os_packet( pVosPacket, &pOsPkt, VOS_TRUE ); if(!VOS_IS_STATUS_SUCCESS( status )) { //This is bad but still try to free the VOSS resources if we can VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failure extracting skb from vos pkt", __func__); vos_pkt_return_packet( pVosPacket ); return VOS_STATUS_E_FAILURE; } //Get the Adapter context. pAdapter = (hdd_adapter_t *)netdev_priv(((struct sk_buff *)pOsPkt)->dev); if(pAdapter == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: HDD adapter context is Null", __func__); } else { ++pAdapter->hdd_stats.hddTxRxStats.txCompleted; } kfree_skb((struct sk_buff *)pOsPkt); //Return the VOS packet resources. status = vos_pkt_return_packet( pVosPacket ); if(!VOS_IS_STATUS_SUCCESS( status )) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Could not return VOS packet to the pool", __func__); } return status; } /**============================================================================ @brief hdd_softap_tx_fetch_packet_cbk() - Callback function invoked by TL to fetch a packet for transmission. @param vosContext : [in] pointer to VOS context @param staId : [in] Station for which TL is requesting a pkt @param ac : [in] access category requested by TL @param pVosPacket : [out] pointer to VOS packet packet pointer @param pPktMetaInfo : [out] pointer to meta info for the pkt @return : VOS_STATUS_E_EMPTY if no packets to transmit : VOS_STATUS_E_FAILURE if any errors encountered : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_tx_fetch_packet_cbk( v_VOID_t *vosContext, v_U8_t *pStaId, WLANTL_ACEnumType ac, vos_pkt_t **ppVosPacket, WLANTL_MetaInfoType *pPktMetaInfo ) { VOS_STATUS status = VOS_STATUS_E_FAILURE; hdd_adapter_t *pAdapter = NULL; hdd_list_node_t *anchor = NULL; skb_list_node_t *pktNode = NULL; struct sk_buff *skb = NULL; vos_pkt_t *pVosPacket = NULL; v_MACADDR_t* pDestMacAddress = NULL; v_TIME_t timestamp; v_SIZE_t size = 0; v_U8_t STAId = WLAN_MAX_STA_COUNT; hdd_context_t *pHddCtx = NULL; v_U8_t proto_type = 0; //Sanity check on inputs if ( ( NULL == vosContext ) || ( NULL == pStaId ) || ( NULL == ppVosPacket ) || ( NULL == pPktMetaInfo ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Null Params being passed", __func__); return VOS_STATUS_E_FAILURE; } //Get the HDD context. pHddCtx = (hdd_context_t *)vos_get_context( VOS_MODULE_ID_HDD, vosContext ); if ( NULL == pHddCtx ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: HDD adapter context is Null", __func__); return VOS_STATUS_E_FAILURE; } STAId = *pStaId; if (STAId >= WLAN_MAX_STA_COUNT) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Invalid STAId %d passed by TL", __func__, STAId); return VOS_STATUS_E_FAILURE; } pAdapter = pHddCtx->sta_to_adapter[STAId]; if ((NULL == pAdapter) || (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic)) { VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } if (FALSE == pAdapter->aStaInfo[STAId].isUsed ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Unregistered STAId %d passed by TL", __func__, STAId); return VOS_STATUS_E_FAILURE; } /* Monitor traffic */ if ( pHddCtx->cfg_ini->enableTrafficMonitor ) { pHddCtx->traffic_monitor.lastFrameTs = vos_timer_get_system_time(); if ( !atomic_read(&pHddCtx->traffic_monitor.isActiveMode) ) { vos_lock_acquire(&pHddCtx->traffic_monitor.trafficLock); /* It was IDLE mode, * this is new state, then switch mode from suspend to resume */ if ( !atomic_read(&pHddCtx->traffic_monitor.isActiveMode) ) { hdd_set_wlan_suspend_mode(0); vos_timer_start(&pHddCtx->traffic_monitor.trafficTimer, pHddCtx->cfg_ini->trafficIdleTimeout); atomic_set(&pHddCtx->traffic_monitor.isActiveMode, 1); } vos_lock_release(&pHddCtx->traffic_monitor.trafficLock); } } ++pAdapter->hdd_stats.hddTxRxStats.txFetched; *ppVosPacket = NULL; //Make sure the AC being asked for is sane if( ac > WLANTL_MAX_AC || ac < 0) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Invalid AC %d passed by TL", __func__, ac); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.txFetchedAC[ac]; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: AC %d passed by TL", __func__, ac); //Get the vos packet. I don't want to dequeue and enqueue again if we are out of VOS resources //This simplifies the locking and unlocking of Tx queue status = vos_pkt_wrap_data_packet( &pVosPacket, VOS_PKT_TYPE_TX_802_3_DATA, NULL, //OS Pkt is not being passed hdd_softap_tx_low_resource_cbk, pAdapter ); if (status == VOS_STATUS_E_ALREADY || status == VOS_STATUS_E_RESOURCES) { //Remember VOS is in a low resource situation pAdapter->isVosOutOfResource = VOS_TRUE; ++pAdapter->hdd_stats.hddTxRxStats.txFetchLowResources; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_WARN, "%s: VOSS in Low Resource scenario", __func__); //TL needs to handle this case. VOS_STATUS_E_EMPTY is returned when the queue is empty. return VOS_STATUS_E_FAILURE; } /* Only fetch this station and this AC. Return VOS_STATUS_E_EMPTY if nothing there. Do not get next AC as the other branch does. */ spin_lock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); hdd_list_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &size); if (0 == size) { spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); vos_pkt_return_packet(pVosPacket); return VOS_STATUS_E_EMPTY; } status = hdd_list_remove_front( &pAdapter->aStaInfo[STAId].wmm_tx_queue[ac], &anchor ); spin_unlock_bh(&pAdapter->aStaInfo[STAId].wmm_tx_queue[ac].lock); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: AC %d has packets pending", __func__, ac); if(VOS_STATUS_SUCCESS == status) { //If success then we got a valid packet from some AC pktNode = list_entry(anchor, skb_list_node_t, anchor); skb = pktNode->skb; } else { ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Error in de-queuing skb from Tx queue status = %d", __func__, status ); vos_pkt_return_packet(pVosPacket); return VOS_STATUS_E_FAILURE; } //Attach skb to VOS packet. status = vos_pkt_set_os_packet( pVosPacket, skb ); if (status != VOS_STATUS_SUCCESS) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Error attaching skb", __func__); vos_pkt_return_packet(pVosPacket); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; kfree_skb(skb); return VOS_STATUS_E_FAILURE; } //Just being paranoid. To be removed later if(pVosPacket == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: VOS packet returned by VOSS is NULL", __func__); ++pAdapter->stats.tx_dropped; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeueError; kfree_skb(skb); return VOS_STATUS_E_FAILURE; } //Return VOS packet to TL; *ppVosPacket = pVosPacket; //Fill out the meta information needed by TL //FIXME This timestamp is really the time stamp of wrap_data_packet vos_pkt_get_timestamp( pVosPacket, &timestamp ); pPktMetaInfo->usTimeStamp = (v_U16_t)timestamp; if ( 1 < size ) { pPktMetaInfo->bMorePackets = 1; //HDD has more packets to send } else { pPktMetaInfo->bMorePackets = 0; } pPktMetaInfo->ucIsEapol = 0; if(pAdapter->aStaInfo[STAId].tlSTAState != WLANTL_STA_AUTHENTICATED) { if (TRUE == hdd_IsEAPOLPacket( pVosPacket )) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_HIGH, "%s: VOS packet is EAPOL packet", __func__); pPktMetaInfo->ucIsEapol = 1; } } if (pHddCtx->cfg_ini->gEnableDebugLog) { proto_type = vos_pkt_get_proto_type(skb, pHddCtx->cfg_ini->gEnableDebugLog); if (VOS_PKT_PROTO_TYPE_EAPOL & proto_type) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "SAP TX EAPOL"); } else if (VOS_PKT_PROTO_TYPE_DHCP & proto_type) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "SAP TX DHCP"); } } //xg: @@@@: temporarily disble these. will revisit later { pPktMetaInfo->ucUP = pktNode->userPriority; pPktMetaInfo->ucTID = pPktMetaInfo->ucUP; } pPktMetaInfo->ucType = 0; //FIXME Don't know what this is //Extract the destination address from ethernet frame pDestMacAddress = (v_MACADDR_t*)skb->data; // we need 802.3 to 802.11 frame translation // (note that Bcast/Mcast will be translated in SW, unicast in HW) pPktMetaInfo->ucDisableFrmXtl = 0; pPktMetaInfo->ucBcast = vos_is_macaddr_broadcast( pDestMacAddress ) ? 1 : 0; pPktMetaInfo->ucMcast = vos_is_macaddr_group( pDestMacAddress ) ? 1 : 0; if ( (pAdapter->aStaInfo[STAId].txSuspended[ac]) && (size <= ((pAdapter->aTxQueueLimit[ac]*3)/4) )) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: TX queue re-enabled", __func__); pAdapter->aStaInfo[STAId].txSuspended[ac] = VOS_FALSE; netif_wake_subqueue(pAdapter->dev, skb_get_queue_mapping(skb)); } // We're giving the packet to TL so consider it transmitted from // a statistics perspective. We account for it here instead of // when the packet is returned for two reasons. First, TL will // manipulate the skb to the point where the len field is not // accurate, leading to inaccurate byte counts if we account for // it later. Second, TL does not provide any feedback as to // whether or not the packet was successfully sent over the air, // so the packet counts will be the same regardless of where we // account for them pAdapter->stats.tx_bytes += skb->len; ++pAdapter->stats.tx_packets; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeued; ++pAdapter->hdd_stats.hddTxRxStats.txFetchDequeuedAC[ac]; pAdapter->hdd_stats.hddTxRxStats.continuousTxTimeoutCount = 0; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: Valid VOS PKT returned to TL", __func__); return status; } /**============================================================================ @brief hdd_softap_tx_low_resource_cbk() - Callback function invoked in the case where VOS packets are not available at the time of the call to get packets. This callback function is invoked by VOS when packets are available. @param pVosPacket : [in] pointer to VOS packet @param userData : [in] opaque user data that was passed initially @return : VOS_STATUS_E_FAILURE if any errors encountered, : VOS_STATUS_SUCCESS otherwise =============================================================================*/ VOS_STATUS hdd_softap_tx_low_resource_cbk( vos_pkt_t *pVosPacket, v_VOID_t *userData ) { VOS_STATUS status; v_SINT_t i = 0; v_SIZE_t size = 0; hdd_adapter_t* pAdapter = (hdd_adapter_t *)userData; v_U8_t STAId = WLAN_MAX_STA_COUNT; if(pAdapter == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: HDD adapter context is Null", __func__); return VOS_STATUS_E_FAILURE; } //Return the packet to VOS. We just needed to know that VOS is out of low resource //situation. Here we will only signal TL that there is a pending data for a STA. //VOS packet will be requested (if needed) when TL comes back to fetch data. vos_pkt_return_packet( pVosPacket ); pAdapter->isVosOutOfResource = VOS_FALSE; // Indicate to TL that there is pending data if a queue is non empty. // This Code wasnt included in earlier version which resulted in // Traffic stalling for (STAId = 0; STAId < WLAN_MAX_STA_COUNT; STAId++) { if ((pAdapter->aStaInfo[STAId].tlSTAState == WLANTL_STA_AUTHENTICATED) || (pAdapter->aStaInfo[STAId].tlSTAState == WLANTL_STA_CONNECTED)) { for( i=NUM_TX_QUEUES-1; i>=0; --i ) { size = 0; hdd_list_size(&pAdapter->aStaInfo[STAId].wmm_tx_queue[i], &size); if ( size > 0 ) { status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, STAId, (WLANTL_ACEnumType)i ); if( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failure in indicating pkt to TL for ac=%d", __func__,i); } } } } } return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_softap_rx_packet_cbk() - Receive callback registered with TL. TL will call this to notify the HDD when one or more packets were received for a registered STA. @param vosContext : [in] pointer to VOS context @param pVosPacketChain : [in] pointer to VOS packet chain @param staId : [in] Station Id (Adress 1 Index) @param pRxMetaInfo : [in] pointer to meta info for the received pkt(s). @return : VOS_STATUS_E_FAILURE if any errors encountered, : VOS_STATUS_SUCCESS otherwise ===========================================================================*/ VOS_STATUS hdd_softap_rx_packet_cbk( v_VOID_t *vosContext, vos_pkt_t *pVosPacketChain, v_U8_t staId, WLANTL_RxMetaInfoType* pRxMetaInfo ) { hdd_adapter_t *pAdapter = NULL; VOS_STATUS status = VOS_STATUS_E_FAILURE; int rxstat; struct sk_buff *skb = NULL; vos_pkt_t* pVosPacket; vos_pkt_t* pNextVosPacket; hdd_context_t *pHddCtx = NULL; v_U8_t proto_type; //Sanity check on inputs if ( ( NULL == vosContext ) || ( NULL == pVosPacketChain ) || ( NULL == pRxMetaInfo ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Null params being passed", __func__); return VOS_STATUS_E_FAILURE; } pHddCtx = (hdd_context_t *)vos_get_context( VOS_MODULE_ID_HDD, vosContext ); if ( NULL == pHddCtx ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: HDD adapter context is Null", __func__); return VOS_STATUS_E_FAILURE; } pAdapter = pHddCtx->sta_to_adapter[staId]; if( NULL == pAdapter ) { VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } /* Monitor traffic */ if ( pHddCtx->cfg_ini->enableTrafficMonitor ) { pHddCtx->traffic_monitor.lastFrameTs = vos_timer_get_system_time(); if ( !atomic_read(&pHddCtx->traffic_monitor.isActiveMode) ) { vos_lock_acquire(&pHddCtx->traffic_monitor.trafficLock); /* It was IDLE mode, * this is new state, then switch mode from suspend to resume */ if ( !atomic_read(&pHddCtx->traffic_monitor.isActiveMode) ) { hdd_set_wlan_suspend_mode(0); vos_timer_start(&pHddCtx->traffic_monitor.trafficTimer, pHddCtx->cfg_ini->trafficIdleTimeout); atomic_set(&pHddCtx->traffic_monitor.isActiveMode, 1); } vos_lock_release(&pHddCtx->traffic_monitor.trafficLock); } } ++pAdapter->hdd_stats.hddTxRxStats.rxChains; // walk the chain until all are processed pVosPacket = pVosPacketChain; do { // get the pointer to the next packet in the chain // (but don't unlink the packet since we free the entire chain later) status = vos_pkt_walk_packet_chain( pVosPacket, &pNextVosPacket, VOS_FALSE); // both "success" and "empty" are acceptable results if (!((status == VOS_STATUS_SUCCESS) || (status == VOS_STATUS_E_EMPTY))) { ++pAdapter->hdd_stats.hddTxRxStats.rxDropped; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failure walking packet chain", __func__); return VOS_STATUS_E_FAILURE; } // Extract the OS packet (skb). // Tell VOS to detach the OS packet from the VOS packet status = vos_pkt_get_os_packet( pVosPacket, (v_VOID_t **)&skb, VOS_TRUE ); if(!VOS_IS_STATUS_SUCCESS( status )) { ++pAdapter->hdd_stats.hddTxRxStats.rxDropped; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failure extracting skb from vos pkt", __func__); return VOS_STATUS_E_FAILURE; } //hdd_softap_dump_sk_buff(skb); skb->dev = pAdapter->dev; if(skb->dev == NULL) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_FATAL, "ERROR!!Invalid netdevice"); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.rxPackets; ++pAdapter->stats.rx_packets; pAdapter->stats.rx_bytes += skb->len; if (pHddCtx->cfg_ini->gEnableDebugLog) { proto_type = vos_pkt_get_proto_type(skb, pHddCtx->cfg_ini->gEnableDebugLog); if (VOS_PKT_PROTO_TYPE_EAPOL & proto_type) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "SAP RX EAPOL"); } else if (VOS_PKT_PROTO_TYPE_DHCP & proto_type) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "SAP RX DHCP"); } } if (WLAN_RX_BCMC_STA_ID == pRxMetaInfo->ucDesSTAId) { //MC/BC packets. Duplicate a copy of packet struct sk_buff *pSkbCopy; hdd_ap_ctx_t *pHddApCtx; pHddApCtx = WLAN_HDD_GET_AP_CTX_PTR(pAdapter); if (!(pHddApCtx->apDisableIntraBssFwd)) { pSkbCopy = skb_copy(skb, GFP_ATOMIC); if (pSkbCopy) { hdd_softap_sta_2_sta_xmit(pSkbCopy, pSkbCopy->dev, pHddApCtx->uBCStaId, (pRxMetaInfo->ucUP)); } } else { VOS_TRACE(VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: skb allocation fails", __func__); } } //(WLAN_RX_BCMC_STA_ID == staId) if ((WLAN_RX_BCMC_STA_ID == pRxMetaInfo->ucDesSTAId) || (WLAN_RX_SAP_SELF_STA_ID == pRxMetaInfo->ucDesSTAId)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_LOW, "%s: send one packet to kernel", __func__); skb->protocol = eth_type_trans(skb, skb->dev); skb->ip_summed = CHECKSUM_NONE; #ifdef WLAN_OPEN_SOURCE #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK wake_lock_timeout(&pHddCtx->rx_wake_lock, msecs_to_jiffies(HDD_WAKE_LOCK_DURATION)); #endif #endif rxstat = netif_rx_ni(skb); if (NET_RX_SUCCESS == rxstat) { ++pAdapter->hdd_stats.hddTxRxStats.rxDelivered; ++pAdapter->hdd_stats.hddTxRxStats.pkt_rx_count; } else { ++pAdapter->hdd_stats.hddTxRxStats.rxRefused; } } else if ((WLAN_HDD_GET_AP_CTX_PTR(pAdapter))->apDisableIntraBssFwd) { kfree_skb(skb); } else { //loopback traffic status = hdd_softap_sta_2_sta_xmit(skb, skb->dev, pRxMetaInfo->ucDesSTAId, (pRxMetaInfo->ucUP)); } // now process the next packet in the chain pVosPacket = pNextVosPacket; } while (pVosPacket); //Return the entire VOS packet chain to the resource pool status = vos_pkt_return_packet( pVosPacketChain ); if(!VOS_IS_STATUS_SUCCESS( status )) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failure returning vos pkt", __func__); } pAdapter->dev->last_rx = jiffies; return status; } VOS_STATUS hdd_softap_DeregisterSTA( hdd_adapter_t *pAdapter, tANI_U8 staId ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; hdd_context_t *pHddCtx; if (NULL == pAdapter) { VOS_TRACE(VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: pAdapter is NULL", __func__); return VOS_STATUS_E_INVAL; } if (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic) { VOS_TRACE(VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Invalid pAdapter magic", __func__); return VOS_STATUS_E_INVAL; } pHddCtx = (hdd_context_t*)(pAdapter->pHddCtx); //Clear station in TL and then update HDD data structures. This helps //to block RX frames from other station to this station. vosStatus = WLANTL_ClearSTAClient( pHddCtx->pvosContext, staId ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "WLANTL_ClearSTAClient() failed to for staID %d. " "Status= %d [0x%08lX]", staId, vosStatus, vosStatus ); } vosStatus = hdd_softap_deinit_tx_rx_sta ( pAdapter, staId ); if( VOS_STATUS_E_FAILURE == vosStatus ) { VOS_TRACE ( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "hdd_softap_deinit_tx_rx_sta() failed for staID %d. " "Status = %d [0x%08lX]", staId, vosStatus, vosStatus ); return( vosStatus ); } pHddCtx->sta_to_adapter[staId] = NULL; return( vosStatus ); } VOS_STATUS hdd_softap_RegisterSTA( hdd_adapter_t *pAdapter, v_BOOL_t fAuthRequired, v_BOOL_t fPrivacyBit, v_U8_t staId, v_U8_t ucastSig, v_U8_t bcastSig, v_MACADDR_t *pPeerMacAddress, v_BOOL_t fWmmEnabled ) { VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE; WLAN_STADescType staDesc = {0}; hdd_context_t *pHddCtx = pAdapter->pHddCtx; hdd_adapter_t *pmonAdapter = NULL; //eCsrEncryptionType connectedCipherAlgo; //v_BOOL_t fConnected; /* * Clean up old entry if it is not cleaned up properly */ if ( pAdapter->aStaInfo[staId].isUsed ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "clean up old entry for STA %d", staId); hdd_softap_DeregisterSTA( pAdapter, staId ); } // Get the Station ID from the one saved during the assocation. staDesc.ucSTAId = staId; /*Save the pAdapter Pointer for this staId*/ pHddCtx->sta_to_adapter[staId] = pAdapter; staDesc.wSTAType = WLAN_STA_SOFTAP; vos_mem_copy( staDesc.vSTAMACAddress.bytes, pPeerMacAddress->bytes,sizeof(pPeerMacAddress->bytes) ); vos_mem_copy( staDesc.vBSSIDforIBSS.bytes, &pAdapter->macAddressCurrent,6 ); vos_copy_macaddr( &staDesc.vSelfMACAddress, &pAdapter->macAddressCurrent ); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "register station"); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "station mac " MAC_ADDRESS_STR, MAC_ADDR_ARRAY(staDesc.vSTAMACAddress.bytes)); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "BSSIDforIBSS " MAC_ADDRESS_STR, MAC_ADDR_ARRAY(staDesc.vBSSIDforIBSS.bytes)); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "SOFTAP SELFMAC " MAC_ADDRESS_STR, MAC_ADDR_ARRAY(staDesc.vSelfMACAddress.bytes)); vosStatus = hdd_softap_init_tx_rx_sta(pAdapter, staId, &staDesc.vSTAMACAddress); staDesc.ucQosEnabled = fWmmEnabled; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "HDD SOFTAP register TL QoS_enabled=%d", staDesc.ucQosEnabled ); staDesc.ucProtectedFrame = (v_U8_t)fPrivacyBit ; // For PRIMA UMA frame translation is not enable yet. staDesc.ucSwFrameTXXlation = 1; staDesc.ucSwFrameRXXlation = 1; staDesc.ucAddRmvLLC = 1; // Initialize signatures and state staDesc.ucUcastSig = ucastSig; staDesc.ucBcastSig = bcastSig; staDesc.ucInitState = fAuthRequired ? WLANTL_STA_CONNECTED : WLANTL_STA_AUTHENTICATED; staDesc.ucIsReplayCheckValid = VOS_FALSE; // Register the Station with TL... vosStatus = WLANTL_RegisterSTAClient( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, hdd_softap_rx_packet_cbk, hdd_softap_tx_complete_cbk, hdd_softap_tx_fetch_packet_cbk, &staDesc, 0 ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "SOFTAP WLANTL_RegisterSTAClient() failed to register. Status= %d [0x%08X]", vosStatus, vosStatus ); return vosStatus; } //Timer value should be in milliseconds if ( pHddCtx->cfg_ini->dynSplitscan && ( VOS_TIMER_STATE_RUNNING != vos_timer_getCurrentState(&pHddCtx->tx_rx_trafficTmr))) { vos_timer_start(&pHddCtx->tx_rx_trafficTmr, pHddCtx->cfg_ini->trafficMntrTmrForSplitScan); } // if ( WPA ), tell TL to go to 'connected' and after keys come to the driver, // then go to 'authenticated'. For all other authentication types (those that do // not require upper layer authentication) we can put TL directly into 'authenticated' // state. //VOS_ASSERT( fConnected ); pAdapter->aStaInfo[staId].ucSTAId = staId; pAdapter->aStaInfo[staId].isQosEnabled = fWmmEnabled; if ( !fAuthRequired ) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "open/shared auth StaId= %d. Changing TL state to AUTHENTICATED at Join time", pAdapter->aStaInfo[staId].ucSTAId ); // Connections that do not need Upper layer auth, transition TL directly // to 'Authenticated' state. vosStatus = WLANTL_ChangeSTAState( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, staDesc.ucSTAId, WLANTL_STA_AUTHENTICATED ); pAdapter->aStaInfo[staId].tlSTAState = WLANTL_STA_AUTHENTICATED; pAdapter->sessionCtx.ap.uIsAuthenticated = VOS_TRUE; } else { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "ULA auth StaId= %d. Changing TL state to CONNECTED at Join time", pAdapter->aStaInfo[staId].ucSTAId ); vosStatus = WLANTL_ChangeSTAState( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, staDesc.ucSTAId, WLANTL_STA_CONNECTED ); pAdapter->aStaInfo[staId].tlSTAState = WLANTL_STA_CONNECTED; pAdapter->sessionCtx.ap.uIsAuthenticated = VOS_FALSE; } pmonAdapter= hdd_get_mon_adapter( pAdapter->pHddCtx); if(pmonAdapter) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO_HIGH, "Turn on Monitor the carrier"); netif_carrier_on(pmonAdapter->dev); //Enable Tx queue netif_tx_start_all_queues(pmonAdapter->dev); } netif_carrier_on(pAdapter->dev); //Enable Tx queue netif_tx_start_all_queues(pAdapter->dev); return( vosStatus ); } VOS_STATUS hdd_softap_Register_BC_STA( hdd_adapter_t *pAdapter, v_BOOL_t fPrivacyBit) { VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE; hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter); v_MACADDR_t broadcastMacAddr = VOS_MAC_ADDR_BROADCAST_INITIALIZER; pHddCtx->sta_to_adapter[WLAN_RX_BCMC_STA_ID] = pAdapter; pHddCtx->sta_to_adapter[WLAN_RX_SAP_SELF_STA_ID] = pAdapter; vosStatus = hdd_softap_RegisterSTA( pAdapter, VOS_FALSE, fPrivacyBit, (WLAN_HDD_GET_AP_CTX_PTR(pAdapter))->uBCStaId, 0, 1, &broadcastMacAddr,0); return vosStatus; } VOS_STATUS hdd_softap_Deregister_BC_STA( hdd_adapter_t *pAdapter) { return hdd_softap_DeregisterSTA( pAdapter, (WLAN_HDD_GET_AP_CTX_PTR(pAdapter))->uBCStaId); } VOS_STATUS hdd_softap_stop_bss( hdd_adapter_t *pAdapter) { hdd_context_t *pHddCtx; VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE; v_U8_t staId = 0; pHddCtx = WLAN_HDD_GET_CTX(pAdapter); /*bss deregister is not allowed during wlan driver loading or unloading*/ if (WLAN_HDD_IS_LOAD_UNLOAD_IN_PROGRESS(pHddCtx)) { VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s:Loading_unloading in Progress. Ignore!!!",__func__); return VOS_STATUS_E_PERM; } vosStatus = hdd_softap_Deregister_BC_STA( pAdapter); if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failed to deregister BC sta Id %d", __func__, (WLAN_HDD_GET_AP_CTX_PTR(pAdapter))->uBCStaId); } for (staId = 0; staId < WLAN_MAX_STA_COUNT; staId++) { if (pAdapter->aStaInfo[staId].isUsed)// This excludes BC sta as it is already deregistered { vosStatus = hdd_softap_DeregisterSTA( pAdapter, staId); if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failed to deregister sta Id %d", __func__, staId); } } } return vosStatus; } VOS_STATUS hdd_softap_change_STA_state( hdd_adapter_t *pAdapter, v_MACADDR_t *pDestMacAddress, WLANTL_STAStateType state) { v_U8_t ucSTAId = WLAN_MAX_STA_COUNT; VOS_STATUS vosStatus = eHAL_STATUS_SUCCESS; v_CONTEXT_t pVosContext = (WLAN_HDD_GET_CTX(pAdapter))->pvosContext; VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: enter", __func__); if (VOS_STATUS_SUCCESS != hdd_softap_GetStaId(pAdapter, pDestMacAddress, &ucSTAId)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Failed to find right station", __func__); return VOS_STATUS_E_FAILURE; } if (FALSE == vos_is_macaddr_equal(&pAdapter->aStaInfo[ucSTAId].macAddrSTA, pDestMacAddress)) { VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_ERROR, "%s: Station MAC address does not matching", __func__); return VOS_STATUS_E_FAILURE; } vosStatus = WLANTL_ChangeSTAState( pVosContext, ucSTAId, state ); VOS_TRACE( VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s: change station to state %d succeed", __func__, state); if (VOS_STATUS_SUCCESS == vosStatus) { pAdapter->aStaInfo[ucSTAId].tlSTAState = WLANTL_STA_AUTHENTICATED; } VOS_TRACE(VOS_MODULE_ID_HDD_SAP_DATA, VOS_TRACE_LEVEL_INFO, "%s exit",__func__); return vosStatus; } VOS_STATUS hdd_softap_GetStaId(hdd_adapter_t *pAdapter, v_MACADDR_t *pMacAddress, v_U8_t *staId) { v_U8_t i; for (i = 0; i < WLAN_MAX_STA_COUNT; i++) { if (vos_mem_compare(&pAdapter->aStaInfo[i].macAddrSTA, pMacAddress, sizeof(v_MACADDR_t)) && pAdapter->aStaInfo[i].isUsed) { *staId = i; return VOS_STATUS_SUCCESS; } } return VOS_STATUS_E_FAILURE; } VOS_STATUS hdd_softap_GetConnectedStaId(hdd_adapter_t *pAdapter, v_U8_t *staId) { v_U8_t i; for (i = 0; i < WLAN_MAX_STA_COUNT; i++) { if (pAdapter->aStaInfo[i].isUsed && (!vos_is_macaddr_broadcast(&pAdapter->aStaInfo[i].macAddrSTA))) { *staId = i; return VOS_STATUS_SUCCESS; } } return VOS_STATUS_E_FAILURE; }
monishk10/moshi_cancro
drivers/staging/prima/CORE/HDD/src/wlan_hdd_softap_tx_rx.c
C
gpl-2.0
71,439
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" /** * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @prio_type: priority type indexed by traffic class * * Configure Rx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *prio_type) { u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; u8 i = 0; reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); reg = IXGBE_READ_REG(hw, IXGBE_RMCS); /* Enable Arbiter */ reg &= ~IXGBE_RMCS_ARBDIS; /* Enable Receive Recycle within the BWG */ reg |= IXGBE_RMCS_RRM; /* Enable Deficit Fixed Priority arbitration*/ reg |= IXGBE_RMCS_DFP; IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); if (prio_type[i] == prio_link) reg |= IXGBE_RT2CR_LSP; IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); } reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg |= IXGBE_RDRXCTL_RDMTS_1_2; reg |= IXGBE_RDRXCTL_MPBEN; reg |= IXGBE_RDRXCTL_MCEN; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* Make sure there is enough descriptors before arbitration */ reg &= ~IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); return 0; } /** * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg, max_credits; u8 i; reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); /* Enable arbiter */ reg &= ~IXGBE_DPMCS_ARBDIS; reg |= IXGBE_DPMCS_TSOEF; /* Configure Max TSO packet size 34KB including payload and headers */ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; reg |= refill[i]; reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_TDTQ2TCCR_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_TDTQ2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); } return 0; } /** * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure Tx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg; u8 i; reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* Enable Data Plane Arbiter */ reg &= ~IXGBE_PDPMCS_ARBDIS; /* Enable DFP and Transmit Recycle Mode */ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_TDPT2TCCR_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_TDPT2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); } /* Enable Tx packet buffer division */ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); reg |= IXGBE_DTXCTL_ENDBUBD; IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); return 0; } /** * ixgbe_dcb_config_pfc_82598 - Config priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * * Configure Priority Flow Control for each traffic class. */ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 fcrtl, reg; u8 i; /* Enable Transmit Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); reg &= ~IXGBE_RMCS_TFCE_802_3X; reg |= IXGBE_RMCS_TFCE_PRIORITY; IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Enable Receive Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); if (pfc_en) reg |= IXGBE_FCTRL_RPFCE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if (!(pfc_en & BIT(i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; } fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); } /* Configure pause time */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } /** * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics * @hw: pointer to hardware structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; u8 j = 0; /* Receive Queues stats setting - 8 queues per statistics reg */ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); } /* Transmit Queues stats setting - 4 queues per statistics reg */ for (i = 0; i < 8; i++) { reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); reg |= ((0x1010101) * i); IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); } return 0; } /** * ixgbe_dcb_hw_config_82598 - Config and enable DCB * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_pfc_82598(hw, pfc_en); ixgbe_dcb_config_tc_stats_82598(hw); return 0; }
HarveyHunt/linux
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
C
gpl-2.0
8,776
/* Basic C++ demangling support for GDB. Copyright (C) 1991-2013 Free Software Foundation, Inc. Written by Fred Fish at Cygnus Support. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* This file contains support code for C++ demangling that is common to a styles of demangling, and GDB specific. */ #include "defs.h" #include "command.h" #include "gdbcmd.h" #include "demangle.h" #include "gdb-demangle.h" #include "gdb_string.h" /* Select the default C++ demangling style to use. The default is "auto", which allows gdb to attempt to pick an appropriate demangling style for the executable it has loaded. It can be set to a specific style ("gnu", "lucid", "arm", "hp", etc.) in which case gdb will never attempt to do auto selection of the style unless you do an explicit "set demangle auto". To select one of these as the default, set DEFAULT_DEMANGLING_STYLE in the appropriate target configuration file. */ #ifndef DEFAULT_DEMANGLING_STYLE #define DEFAULT_DEMANGLING_STYLE AUTO_DEMANGLING_STYLE_STRING #endif /* See documentation in gdb-demangle.h. */ int demangle = 1; static void show_demangle (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { fprintf_filtered (file, _("Demangling of encoded C++/ObjC names " "when displaying symbols is %s.\n"), value); } /* See documentation in gdb-demangle.h. */ int asm_demangle = 0; static void show_asm_demangle (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { fprintf_filtered (file, _("Demangling of C++/ObjC names in " "disassembly listings is %s.\n"), value); } /* String name for the current demangling style. Set by the "set demangle-style" command, printed as part of the output by the "show demangle-style" command. */ static const char *current_demangling_style_string; /* The array of names of the known demanglyng styles. Generated by _initialize_demangler from libiberty_demanglers[] array. */ static const char **demangling_style_names; static void show_demangling_style_names(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { fprintf_filtered (file, _("The current C++ demangling style is \"%s\".\n"), value); } /* Set current demangling style. Called by the "set demangle-style" command after it has updated the current_demangling_style_string to match what the user has entered. If the user has entered a string that matches a known demangling style name in the demanglers[] array then just leave the string alone and update the current_demangling_style enum value to match. If the user has entered a string that doesn't match, including an empty string, then print a list of the currently known styles and restore the current_demangling_style_string to match the current_demangling_style enum value. Note: Assumes that current_demangling_style_string always points to a malloc'd string, even if it is a null-string. */ static void set_demangling_command (char *ignore, int from_tty, struct cmd_list_element *c) { const struct demangler_engine *dem; int i; /* First just try to match whatever style name the user supplied with one of the known ones. Don't bother special casing for an empty name, we just treat it as any other style name that doesn't match. If we match, update the current demangling style enum. */ for (dem = libiberty_demanglers, i = 0; dem->demangling_style != unknown_demangling; dem++) { if (strcmp (current_demangling_style_string, dem->demangling_style_name) == 0) { current_demangling_style = dem->demangling_style; current_demangling_style_string = demangling_style_names[i]; break; } i++; } /* We should have found a match, given we only add known styles to the enumeration list. */ gdb_assert (dem->demangling_style != unknown_demangling); } /* G++ uses a special character to indicate certain internal names. Which character it is depends on the platform: - Usually '$' on systems where the assembler will accept that - Usually '.' otherwise (this includes most sysv4-like systems and most ELF targets) - Occasionally '_' if neither of the above is usable We check '$' first because it is the safest, and '.' often has another meaning. We don't currently try to handle '_' because the precise forms of the names are different on those targets. */ static char cplus_markers[] = {'$', '.', '\0'}; /* See documentation in gdb-demangle.h. */ int is_cplus_marker (int c) { return c && strchr (cplus_markers, c) != NULL; } extern initialize_file_ftype _initialize_demangler; /* -Wmissing-prototypes */ void _initialize_demangler (void) { int i, ndems; /* Fill the demangling_style_names[] array, and set the default demangling style chosen at compilation time. */ for (ndems = 0; libiberty_demanglers[ndems].demangling_style != unknown_demangling; ndems++) ; demangling_style_names = xcalloc (ndems + 1, sizeof (char *)); for (i = 0; libiberty_demanglers[i].demangling_style != unknown_demangling; i++) { demangling_style_names[i] = xstrdup (libiberty_demanglers[i].demangling_style_name); if (current_demangling_style_string == NULL && strcmp (DEFAULT_DEMANGLING_STYLE, demangling_style_names[i]) == 0) current_demangling_style_string = demangling_style_names[i]; } add_setshow_boolean_cmd ("demangle", class_support, &demangle, _("\ Set demangling of encoded C++/ObjC names when displaying symbols."), _("\ Show demangling of encoded C++/ObjC names when displaying symbols."), NULL, NULL, show_demangle, &setprintlist, &showprintlist); add_setshow_boolean_cmd ("asm-demangle", class_support, &asm_demangle, _("\ Set demangling of C++/ObjC names in disassembly listings."), _("\ Show demangling of C++/ObjC names in disassembly listings."), NULL, NULL, show_asm_demangle, &setprintlist, &showprintlist); add_setshow_enum_cmd ("demangle-style", class_support, demangling_style_names, &current_demangling_style_string, _("\ Set the current C++ demangling style."), _("\ Show the current C++ demangling style."), _("\ Use `set demangle-style' without arguments for a list of demangling styles."), set_demangling_command, show_demangling_style_names, &setlist, &showlist); }
dje42/gdb
gdb/demangle.c
C
gpl-2.0
7,136
/* ChibiOS/RT - Copyright (C) 2006,2007,2008,2009,2010, 2011,2012,2013 Giovanni Di Sirio. This file is part of ChibiOS/RT. ChibiOS/RT is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. ChibiOS/RT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @file GCC/ARMCMx/STM32L1xx/vectors.c * @brief Interrupt vectors for the STM32 family. * * @defgroup ARMCMx_STM32L1xx_VECTORS STM32L1xx Interrupt Vectors * @ingroup ARMCMx_SPECIFIC * @details Interrupt vectors for the STM32L1xx family. * @{ */ #include "ch.h" /** * @brief Type of an IRQ vector. */ typedef void (*irq_vector_t)(void); /** * @brief Type of a structure representing the whole vectors table. */ typedef struct { uint32_t *init_stack; irq_vector_t reset_vector; irq_vector_t nmi_vector; irq_vector_t hardfault_vector; irq_vector_t memmanage_vector; irq_vector_t busfault_vector; irq_vector_t usagefault_vector; irq_vector_t vector1c; irq_vector_t vector20; irq_vector_t vector24; irq_vector_t vector28; irq_vector_t svcall_vector; irq_vector_t debugmonitor_vector; irq_vector_t vector34; irq_vector_t pendsv_vector; irq_vector_t systick_vector; irq_vector_t vectors[45]; } vectors_t; #if !defined(__DOXYGEN__) extern uint32_t __main_stack_end__; extern void ResetHandler(void); extern void NMIVector(void); extern void HardFaultVector(void); extern void MemManageVector(void); extern void BusFaultVector(void); extern void UsageFaultVector(void); extern void Vector1C(void); extern void Vector20(void); extern void Vector24(void); extern void Vector28(void); extern void SVCallVector(void); extern void DebugMonitorVector(void); extern void Vector34(void); extern void PendSVVector(void); extern void SysTickVector(void); extern void Vector40(void); extern void Vector44(void); extern void Vector48(void); extern void Vector4C(void); extern void Vector50(void); extern void Vector54(void); extern void Vector58(void); extern void Vector5C(void); extern void Vector60(void); extern void Vector64(void); extern void Vector68(void); extern void Vector6C(void); extern void Vector70(void); extern void Vector74(void); extern void Vector78(void); extern void Vector7C(void); extern void Vector80(void); extern void Vector84(void); extern void Vector88(void); extern void Vector8C(void); extern void Vector90(void); extern void Vector94(void); extern void Vector98(void); extern void Vector9C(void); extern void VectorA0(void); extern void VectorA4(void); extern void VectorA8(void); extern void VectorAC(void); extern void VectorB0(void); extern void VectorB4(void); extern void VectorB8(void); extern void VectorBC(void); extern void VectorC0(void); extern void VectorC4(void); extern void VectorC8(void); extern void VectorCC(void); extern void VectorD0(void); extern void VectorD4(void); extern void VectorD8(void); extern void VectorDC(void); extern void VectorE0(void); extern void VectorE4(void); extern void VectorE8(void); extern void VectorEC(void); extern void VectorF0(void); #endif /* !defined(__DOXYGEN__) */ /** * @brief STM32L1xx vectors table. */ #if !defined(__DOXYGEN__) __attribute__ ((section("vectors"))) #endif vectors_t _vectors = { &__main_stack_end__,ResetHandler, NMIVector, HardFaultVector, MemManageVector, BusFaultVector, UsageFaultVector, Vector1C, Vector20, Vector24, Vector28, SVCallVector, DebugMonitorVector, Vector34, PendSVVector, SysTickVector, { Vector40, Vector44, Vector48, Vector4C, Vector50, Vector54, Vector58, Vector5C, Vector60, Vector64, Vector68, Vector6C, Vector70, Vector74, Vector78, Vector7C, Vector80, Vector84, Vector88, Vector8C, Vector90, Vector94, Vector98, Vector9C, VectorA0, VectorA4, VectorA8, VectorAC, VectorB0, VectorB4, VectorB8, VectorBC, VectorC0, VectorC4, VectorC8, VectorCC, VectorD0, VectorD4, VectorD8, VectorDC, VectorE0, VectorE4, VectorE8, VectorEC, VectorF0 } }; /** * @brief Unhandled exceptions handler. * @details Any undefined exception vector points to this function by default. * This function simply stops the system into an infinite loop. * * @notapi */ #if !defined(__DOXYGEN__) __attribute__ ((naked)) #endif void _unhandled_exception(void) { while (TRUE) ; } void NMIVector(void) __attribute__((weak, alias("_unhandled_exception"))); void HardFaultVector(void) __attribute__((weak, alias("_unhandled_exception"))); void MemManageVector(void) __attribute__((weak, alias("_unhandled_exception"))); void BusFaultVector(void) __attribute__((weak, alias("_unhandled_exception"))); void UsageFaultVector(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector1C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector20(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector24(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector28(void) __attribute__((weak, alias("_unhandled_exception"))); void SVCallVector(void) __attribute__((weak, alias("_unhandled_exception"))); void DebugMonitorVector(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector34(void) __attribute__((weak, alias("_unhandled_exception"))); void PendSVVector(void) __attribute__((weak, alias("_unhandled_exception"))); void SysTickVector(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector40(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector44(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector48(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector4C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector50(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector54(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector58(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector5C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector60(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector64(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector68(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector6C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector70(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector74(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector78(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector7C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector80(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector84(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector88(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector8C(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector90(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector94(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector98(void) __attribute__((weak, alias("_unhandled_exception"))); void Vector9C(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorA0(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorA4(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorA8(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorAC(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorB0(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorB4(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorB8(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorBC(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorC0(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorC4(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorC8(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorCC(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorD0(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorD4(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorD8(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorDC(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorE0(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorE4(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorE8(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorEC(void) __attribute__((weak, alias("_unhandled_exception"))); void VectorF0(void) __attribute__((weak, alias("_unhandled_exception"))); /** @} */
kvzhao/Embedded-ROS
os/ports/GCC/ARMCMx/STM32L1xx/vectors.c
C
gpl-3.0
9,889
/* * Copyright (C) 2005-2009 Junjiro R. Okajima * * This program, aufs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* * inode functions */ #include "aufs.h" struct inode *au_igrab(struct inode *inode) { if (inode) { AuDebugOn(!atomic_read(&inode->i_count)); atomic_inc_return(&inode->i_count); } return inode; } static void au_refresh_hinode_attr(struct inode *inode, int do_version) { au_cpup_attr_all(inode, /*force*/0); au_update_iigen(inode); if (do_version) inode->i_version++; } int au_refresh_hinode_self(struct inode *inode, int do_attr) { int err; aufs_bindex_t bindex, new_bindex; unsigned char update; struct inode *first; struct au_hinode *p, *q, tmp; struct super_block *sb; struct au_iinfo *iinfo; IiMustWriteLock(inode); update = 0; sb = inode->i_sb; iinfo = au_ii(inode); err = au_ii_realloc(iinfo, au_sbend(sb) + 1); if (unlikely(err)) goto out; p = iinfo->ii_hinode + iinfo->ii_bstart; first = p->hi_inode; err = 0; for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++, p++) { if (!p->hi_inode) continue; new_bindex = au_br_index(sb, p->hi_id); if (new_bindex == bindex) continue; if (new_bindex < 0) { update++; au_hiput(p); p->hi_inode = NULL; continue; } if (new_bindex < iinfo->ii_bstart) iinfo->ii_bstart = new_bindex; if (iinfo->ii_bend < new_bindex) iinfo->ii_bend = new_bindex; /* swap two lower inode, and loop again */ q = iinfo->ii_hinode + new_bindex; tmp = *q; *q = *p; *p = tmp; if (tmp.hi_inode) { bindex--; p--; } } au_update_brange(inode, /*do_put_zero*/0); if (do_attr) au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode)); out: return err; } int au_refresh_hinode(struct inode *inode, struct dentry *dentry) { int err, update; unsigned int flags; aufs_bindex_t bindex, bend; unsigned char isdir; struct inode *first; struct au_hinode *p; struct au_iinfo *iinfo; err = au_refresh_hinode_self(inode, /*do_attr*/0); if (unlikely(err)) goto out; update = 0; iinfo = au_ii(inode); p = iinfo->ii_hinode + iinfo->ii_bstart; first = p->hi_inode; isdir = S_ISDIR(inode->i_mode); flags = au_hi_flags(inode, isdir); bend = au_dbend(dentry); for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { struct inode *h_i; struct dentry *h_d; h_d = au_h_dptr(dentry, bindex); if (!h_d || !h_d->d_inode) continue; if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) { h_i = au_h_iptr(inode, bindex); if (h_i) { if (h_i == h_d->d_inode) continue; err = -EIO; break; } } if (bindex < iinfo->ii_bstart) iinfo->ii_bstart = bindex; if (iinfo->ii_bend < bindex) iinfo->ii_bend = bindex; au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags); update = 1; } au_update_brange(inode, /*do_put_zero*/0); if (unlikely(err)) goto out; au_refresh_hinode_attr(inode, update && isdir); out: AuTraceErr(err); return err; } static int set_inode(struct inode *inode, struct dentry *dentry) { int err; unsigned int flags; umode_t mode; aufs_bindex_t bindex, bstart, btail; unsigned char isdir; struct dentry *h_dentry; struct inode *h_inode; struct au_iinfo *iinfo; IiMustWriteLock(inode); err = 0; isdir = 0; bstart = au_dbstart(dentry); h_inode = au_h_dptr(dentry, bstart)->d_inode; mode = h_inode->i_mode; switch (mode & S_IFMT) { case S_IFREG: btail = au_dbtail(dentry); inode->i_op = &aufs_iop; inode->i_fop = &aufs_file_fop; inode->i_mapping->a_ops = &aufs_aop; break; case S_IFDIR: isdir = 1; btail = au_dbtaildir(dentry); inode->i_op = &aufs_dir_iop; inode->i_fop = &aufs_dir_fop; break; case S_IFLNK: btail = au_dbtail(dentry); inode->i_op = &aufs_symlink_iop; break; case S_IFBLK: case S_IFCHR: case S_IFIFO: case S_IFSOCK: btail = au_dbtail(dentry); inode->i_op = &aufs_iop; init_special_inode(inode, mode, h_inode->i_rdev); break; default: AuIOErr("Unknown file type 0%o\n", mode); err = -EIO; goto out; } /* do not set inotify for whiteouted dirs (SHWH mode) */ flags = au_hi_flags(inode, isdir); if (au_opt_test(au_mntflags(dentry->d_sb), SHWH) && au_ftest_hi(flags, HINOTIFY) && dentry->d_name.len > AUFS_WH_PFX_LEN && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) au_fclr_hi(flags, HINOTIFY); iinfo = au_ii(inode); iinfo->ii_bstart = bstart; iinfo->ii_bend = btail; for (bindex = bstart; bindex <= btail; bindex++) { h_dentry = au_h_dptr(dentry, bindex); if (h_dentry) au_set_h_iptr(inode, bindex, au_igrab(h_dentry->d_inode), flags); } au_cpup_attr_all(inode, /*force*/1); out: return err; } /* successful returns with iinfo write_locked */ static int reval_inode(struct inode *inode, struct dentry *dentry, int *matched) { int err; aufs_bindex_t bindex, bend; struct inode *h_inode, *h_dinode; *matched = 0; /* * before this function, if aufs got any iinfo lock, it must be only * one, the parent dir. * it can happen by UDBA and the obsoleted inode number. */ err = -EIO; if (unlikely(inode->i_ino == parent_ino(dentry))) goto out; err = 0; ii_write_lock_new_child(inode); h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode; bend = au_ibend(inode); for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { h_inode = au_h_iptr(inode, bindex); if (h_inode && h_inode == h_dinode) { *matched = 1; err = 0; if (au_iigen(inode) != au_digen(dentry)) err = au_refresh_hinode(inode, dentry); break; } } if (unlikely(err)) ii_write_unlock(inode); out: return err; } int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, unsigned int d_type, ino_t *ino) { int err; struct mutex *mtx; const int isdir = (d_type == DT_DIR); /* prevent hardlinks from race condition */ mtx = NULL; if (!isdir) { mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx; mutex_lock(mtx); } err = au_xino_read(sb, bindex, h_ino, ino); if (unlikely(err)) goto out; if (!*ino) { err = -EIO; *ino = au_xino_new_ino(sb); if (unlikely(!*ino)) goto out; err = au_xino_write(sb, bindex, h_ino, *ino); if (unlikely(err)) goto out; } out: if (!isdir) mutex_unlock(mtx); return err; } /* successful returns with iinfo write_locked */ /* todo: return with unlocked? */ struct inode *au_new_inode(struct dentry *dentry, int must_new) { struct inode *inode; struct dentry *h_dentry; struct super_block *sb; ino_t h_ino, ino; int err, match; aufs_bindex_t bstart; sb = dentry->d_sb; bstart = au_dbstart(dentry); h_dentry = au_h_dptr(dentry, bstart); h_ino = h_dentry->d_inode->i_ino; err = au_xino_read(sb, bstart, h_ino, &ino); inode = ERR_PTR(err); if (unlikely(err)) goto out; new_ino: if (!ino) { ino = au_xino_new_ino(sb); if (unlikely(!ino)) { inode = ERR_PTR(-EIO); goto out; } } AuDbg("i%lu\n", (unsigned long)ino); inode = au_iget_locked(sb, ino); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW)); if (inode->i_state & I_NEW) { ii_write_lock_new_child(inode); err = set_inode(inode, dentry); unlock_new_inode(inode); if (!err) goto out; /* success */ iget_failed(inode); ii_write_unlock(inode); goto out_iput; } else if (!must_new) { err = reval_inode(inode, dentry, &match); if (!err) goto out; /* success */ else if (match) goto out_iput; } if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode))) AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir," " b%d, %s, %.*s, hi%lu, i%lu.\n", bstart, au_sbtype(h_dentry->d_sb), AuDLNPair(dentry), (unsigned long)h_ino, (unsigned long)ino); ino = 0; err = au_xino_write(sb, bstart, h_ino, /*ino*/0); if (!err) { iput(inode); goto new_ino; } out_iput: iput(inode); inode = ERR_PTR(err); out: return inode; } /* ---------------------------------------------------------------------- */ int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, struct inode *inode) { int err; err = au_br_rdonly(au_sbr(sb, bindex)); /* pseudo-link after flushed may happen out of bounds */ if (!err && inode && au_ibstart(inode) <= bindex && bindex <= au_ibend(inode)) { /* * permission check is unnecessary since vfsub routine * will be called later */ struct inode *hi = au_h_iptr(inode, bindex); if (hi) err = IS_IMMUTABLE(hi) ? -EROFS : 0; } return err; } int au_test_h_perm(struct inode *h_inode, int mask) { if (!current_fsuid()) return 0; return inode_permission(h_inode, mask); } int au_test_h_perm_sio(struct inode *h_inode, int mask) { if (au_test_nfs(h_inode->i_sb) && (mask & MAY_WRITE) && S_ISDIR(h_inode->i_mode)) mask |= MAY_READ; /* force permission check */ return au_test_h_perm(h_inode, mask); }
ArthySundaram/firstrepo
ubuntu/aufs/inode.c
C
gpl-2.0
9,534
// SPDX-License-Identifier: GPL-2.0 /* * USB4 specific functionality * * Copyright (C) 2019, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rajmohan Mani <rajmohan.mani@intel.com> */ #include <linux/delay.h> #include <linux/ktime.h> #include "tb.h" #define USB4_DATA_DWORDS 16 #define USB4_DATA_RETRIES 3 enum usb4_switch_op { USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, USB4_SWITCH_OP_NVM_WRITE = 0x20, USB4_SWITCH_OP_NVM_AUTH = 0x21, USB4_SWITCH_OP_NVM_READ = 0x22, USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, USB4_SWITCH_OP_DROM_READ = 0x24, USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, }; #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) #define USB4_NVM_READ_OFFSET_SHIFT 2 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) #define USB4_NVM_READ_LENGTH_SHIFT 24 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) #define USB4_DROM_ADDRESS_SHIFT 2 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) #define USB4_DROM_SIZE_SHIFT 15 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t); typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t); static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec) { ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); do { u32 val; int ret; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if ((val & bit) == value) return 0; usleep_range(50, 100); } while (ktime_before(ktime_get(), timeout)); return -ETIMEDOUT; } static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, size_t dwords) { if (dwords > USB4_DATA_DWORDS) return -EINVAL; return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); } static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, size_t dwords) { if (dwords > USB4_DATA_DWORDS) return -EINVAL; return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); } static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) { return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); } static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) { return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); } static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address, void *buf, size_t size, read_block_fn read_block) { unsigned int retries = USB4_DATA_RETRIES; unsigned int offset; offset = address & 3; address = address & ~3; do { size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); unsigned int dwaddress, dwords; u8 data[USB4_DATA_DWORDS * 4]; int ret; dwaddress = address / 4; dwords = ALIGN(nbytes, 4) / 4; ret = read_block(sw, dwaddress, data, dwords); if (ret) { if (ret == -ETIMEDOUT) { if (retries--) continue; ret = -EIO; } return ret; } memcpy(buf, data + offset, nbytes); size -= nbytes; address += nbytes; buf += nbytes; } while (size > 0); return 0; } static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address, const void *buf, size_t size, write_block_fn write_next_block) { unsigned int retries = USB4_DATA_RETRIES; unsigned int offset; offset = address & 3; address = address & ~3; do { u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4); u8 data[USB4_DATA_DWORDS * 4]; int ret; memcpy(data + offset, buf, nbytes); ret = write_next_block(sw, data, nbytes / 4); if (ret) { if (ret == -ETIMEDOUT) { if (retries--) continue; ret = -EIO; } return ret; } size -= nbytes; address += nbytes; buf += nbytes; } while (size > 0); return 0; } static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) { u32 val; int ret; val = opcode | ROUTER_CS_26_OV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) return ret; ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); if (ret) return ret; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (val & ROUTER_CS_26_ONS) return -EOPNOTSUPP; *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; return 0; } /** * usb4_switch_setup() - Additional setup for USB4 device * @sw: USB4 router to setup * * USB4 routers need additional settings in order to enable all the * tunneling. This function enables USB and PCIe tunneling if it can be * enabled (e.g the parent switch also supports them). If USB tunneling * is not available for some reason (like that there is Thunderbolt 3 * switch upstream) then the internal xHCI controller is enabled * instead. */ int usb4_switch_setup(struct tb_switch *sw) { struct tb_switch *parent; bool tbt3, xhci; u32 val = 0; int ret; if (!tb_route(sw)) return 0; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); if (ret) return ret; xhci = val & ROUTER_CS_6_HCI; tbt3 = !(val & ROUTER_CS_6_TNS); tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", tbt3 ? "yes" : "no", xhci ? "yes" : "no"); ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; parent = tb_switch_parent(sw); if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { val |= ROUTER_CS_5_UTO; xhci = false; } /* Only enable PCIe tunneling if the parent router supports it */ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { val |= ROUTER_CS_5_PTO; /* * xHCI can be enabled if PCIe tunneling is supported * and the parent does not have any USB3 dowstream * adapters (so we cannot do USB 3.x tunneling). */ if (xhci) val |= ROUTER_CS_5_HCO; } /* TBT3 supported by the CM */ val |= ROUTER_CS_5_C3S; /* Tunneling configuration is ready now */ val |= ROUTER_CS_5_CV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, ROUTER_CS_6_CR, 50); } /** * usb4_switch_read_uid() - Read UID from USB4 router * @sw: USB4 router * * Reads 64-bit UID from USB4 router config space. */ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) { return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); } static int usb4_switch_drom_read_block(struct tb_switch *sw, unsigned int dwaddress, void *buf, size_t dwords) { u8 status = 0; u32 metadata; int ret; metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & USB4_DROM_ADDRESS_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); if (ret) return ret; if (status) return -EIO; return usb4_switch_op_read_data(sw, buf, dwords); } /** * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM * @sw: USB4 router * * Uses USB4 router operations to read router DROM. For devices this * should always work but for hosts it may return %-EOPNOTSUPP in which * case the host router does not have DROM. */ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { return usb4_switch_do_read_data(sw, address, buf, size, usb4_switch_drom_read_block); } static int usb4_set_port_configured(struct tb_port *port, bool configured) { int ret; u32 val; ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_19, 1); if (ret) return ret; if (configured) val |= PORT_CS_19_PC; else val &= ~PORT_CS_19_PC; return tb_port_write(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_19, 1); } /** * usb4_switch_configure_link() - Set upstream USB4 link configured * @sw: USB4 router * * Sets the upstream USB4 link to be configured for power management * purposes. */ int usb4_switch_configure_link(struct tb_switch *sw) { struct tb_port *up; if (!tb_route(sw)) return 0; up = tb_upstream_port(sw); return usb4_set_port_configured(up, true); } /** * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration * @sw: USB4 router * * Reverse of usb4_switch_configure_link(). */ void usb4_switch_unconfigure_link(struct tb_switch *sw) { struct tb_port *up; if (sw->is_unplugged || !tb_route(sw)) return; up = tb_upstream_port(sw); usb4_set_port_configured(up, false); } /** * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding * @sw: USB4 router * * Checks whether conditions are met so that lane bonding can be * established with the upstream router. Call only for device routers. */ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) { struct tb_port *up; int ret; u32 val; up = tb_upstream_port(sw); ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); if (ret) return false; return !!(val & PORT_CS_18_BE); } /** * usb4_switch_set_sleep() - Prepare the router to enter sleep * @sw: USB4 router * * Enables wakes and sets sleep bit for the router. Returns when the * router sleep ready bit has been asserted. */ int usb4_switch_set_sleep(struct tb_switch *sw) { int ret; u32 val; /* Set sleep bit and wait for sleep ready to be asserted */ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; val |= ROUTER_CS_5_SLP; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, ROUTER_CS_6_SLPR, 500); } /** * usb4_switch_nvm_sector_size() - Return router NVM sector size * @sw: USB4 router * * If the router supports NVM operations this function returns the NVM * sector size in bytes. If NVM operations are not supported returns * %-EOPNOTSUPP. */ int usb4_switch_nvm_sector_size(struct tb_switch *sw) { u32 metadata; u8 status; int ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); if (ret) return ret; if (status) return status == 0x2 ? -EOPNOTSUPP : -EIO; ret = usb4_switch_op_read_metadata(sw, &metadata); if (ret) return ret; return metadata & USB4_NVM_SECTOR_SIZE_MASK; } static int usb4_switch_nvm_read_block(struct tb_switch *sw, unsigned int dwaddress, void *buf, size_t dwords) { u8 status = 0; u32 metadata; int ret; metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & USB4_NVM_READ_LENGTH_MASK; metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & USB4_NVM_READ_OFFSET_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); if (ret) return ret; if (status) return -EIO; return usb4_switch_op_read_data(sw, buf, dwords); } /** * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM * @sw: USB4 router * @address: Starting address in bytes * @buf: Read data is placed here * @size: How many bytes to read * * Reads NVM contents of the router. If NVM is not supported returns * %-EOPNOTSUPP. */ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { return usb4_switch_do_read_data(sw, address, buf, size, usb4_switch_nvm_read_block); } static int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) { u32 metadata, dwaddress; u8 status = 0; int ret; dwaddress = address / 4; metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & USB4_NVM_SET_OFFSET_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); if (ret) return ret; return status ? -EIO : 0; } static int usb4_switch_nvm_write_next_block(struct tb_switch *sw, const void *buf, size_t dwords) { u8 status; int ret; ret = usb4_switch_op_write_data(sw, buf, dwords); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); if (ret) return ret; return status ? -EIO : 0; } /** * usb4_switch_nvm_write() - Write to the router NVM * @sw: USB4 router * @address: Start address where to write in bytes * @buf: Pointer to the data to write * @size: Size of @buf in bytes * * Writes @buf to the router NVM using USB4 router operations. If NVM * write is not supported returns %-EOPNOTSUPP. */ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size) { int ret; ret = usb4_switch_nvm_set_offset(sw, address); if (ret) return ret; return usb4_switch_do_write_data(sw, address, buf, size, usb4_switch_nvm_write_next_block); } /** * usb4_switch_nvm_authenticate() - Authenticate new NVM * @sw: USB4 router * * After the new NVM has been written via usb4_switch_nvm_write(), this * function triggers NVM authentication process. If the authentication * is successful the router is power cycled and the new NVM starts * running. In case of failure returns negative errno. */ int usb4_switch_nvm_authenticate(struct tb_switch *sw) { u8 status = 0; int ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); if (ret) return ret; switch (status) { case 0x0: tb_sw_dbg(sw, "NVM authentication successful\n"); return 0; case 0x1: return -EINVAL; case 0x2: return -EAGAIN; case 0x3: return -EOPNOTSUPP; default: return -EIO; } } /** * usb4_switch_query_dp_resource() - Query availability of DP IN resource * @sw: USB4 router * @in: DP IN adapter * * For DP tunneling this function can be used to query availability of * DP IN resource. Returns true if the resource is available for DP * tunneling, false otherwise. */ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return false; ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); /* * If DP resource allocation is not supported assume it is * always available. */ if (ret == -EOPNOTSUPP) return true; else if (ret) return false; return !status; } /** * usb4_switch_alloc_dp_resource() - Allocate DP IN resource * @sw: USB4 router * @in: DP IN adapter * * Allocates DP IN resource for DP tunneling using USB4 router * operations. If the resource was allocated returns %0. Otherwise * returns negative errno, in particular %-EBUSY if the resource is * already allocated. */ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) return ret; return status ? -EBUSY : 0; } /** * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource * @sw: USB4 router * @in: DP IN adapter * * Releases the previously allocated DP IN resource. */ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) return ret; return status ? -EIO : 0; } static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) { struct tb_port *p; int usb4_idx = 0; /* Assume port is primary */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_null(p)) continue; if (tb_is_upstream_port(p)) continue; if (!p->link_nr) { if (p == port) break; usb4_idx++; } } return usb4_idx; } /** * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter * @sw: USB4 router * @port: USB4 port * * USB4 routers have direct mapping between USB4 ports and PCIe * downstream adapters where the PCIe topology is extended. This * function returns the corresponding downstream PCIe adapter or %NULL * if no such mapping was possible. */ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port) { int usb4_idx = usb4_port_idx(sw, port); struct tb_port *p; int pcie_idx = 0; /* Find PCIe down port matching usb4_port */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_pcie_down(p)) continue; if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p)) return p; pcie_idx++; } return NULL; } /** * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter * @sw: USB4 router * @port: USB4 port * * USB4 routers have direct mapping between USB4 ports and USB 3.x * downstream adapters where the USB 3.x topology is extended. This * function returns the corresponding downstream USB 3.x adapter or * %NULL if no such mapping was possible. */ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port) { int usb4_idx = usb4_port_idx(sw, port); struct tb_port *p; int usb_idx = 0; /* Find USB3 down port matching usb4_port */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_usb3_down(p)) continue; if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p)) return p; usb_idx++; } return NULL; } /** * usb4_port_unlock() - Unlock USB4 downstream port * @port: USB4 port to unlock * * Unlocks USB4 downstream port so that the connection manager can * access the router below this port. */ int usb4_port_unlock(struct tb_port *port) { int ret; u32 val; ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); if (ret) return ret; val &= ~ADP_CS_4_LCK; return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); }
c0d3z3r0/linux-rockchip
drivers/thunderbolt/usb4.c
C
gpl-2.0
17,878
/* Simple DirectMedia Layer Copyright (C) 1997-2018 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../../SDL_internal.h" #if SDL_AUDIO_DRIVER_PSP #include <stdio.h> #include <string.h> #include <stdlib.h> #include <malloc.h> #include "SDL_audio.h" #include "SDL_error.h" #include "SDL_timer.h" #include "../SDL_audio_c.h" #include "../SDL_audiodev_c.h" #include "../SDL_sysaudio.h" #include "SDL_pspaudio.h" #include <pspaudio.h> #include <pspthreadman.h> /* The tag name used by PSP audio */ #define PSPAUDIO_DRIVER_NAME "psp" static int PSPAUDIO_OpenDevice(_THIS, void *handle, const char *devname, int iscapture) { int format, mixlen, i; this->hidden = (struct SDL_PrivateAudioData *) SDL_malloc(sizeof(*this->hidden)); if (this->hidden == NULL) { return SDL_OutOfMemory(); } SDL_zerop(this->hidden); switch (this->spec.format & 0xff) { case 8: case 16: this->spec.format = AUDIO_S16LSB; break; default: return SDL_SetError("Unsupported audio format"); } /* The sample count must be a multiple of 64. */ this->spec.samples = PSP_AUDIO_SAMPLE_ALIGN(this->spec.samples); this->spec.freq = 44100; /* Update the fragment size as size in bytes. */ SDL_CalculateAudioSpec(&this->spec); /* Allocate the mixing buffer. Its size and starting address must be a multiple of 64 bytes. Our sample count is already a multiple of 64, so spec->size should be a multiple of 64 as well. */ mixlen = this->spec.size * NUM_BUFFERS; this->hidden->rawbuf = (Uint8 *) memalign(64, mixlen); if (this->hidden->rawbuf == NULL) { return SDL_SetError("Couldn't allocate mixing buffer"); } /* Setup the hardware channel. */ if (this->spec.channels == 1) { format = PSP_AUDIO_FORMAT_MONO; } else { this->spec.channels = 2; format = PSP_AUDIO_FORMAT_STEREO; } this->hidden->channel = sceAudioChReserve(PSP_AUDIO_NEXT_CHANNEL, this->spec.samples, format); if (this->hidden->channel < 0) { free(this->hidden->rawbuf); this->hidden->rawbuf = NULL; return SDL_SetError("Couldn't reserve hardware channel"); } memset(this->hidden->rawbuf, 0, mixlen); for (i = 0; i < NUM_BUFFERS; i++) { this->hidden->mixbufs[i] = &this->hidden->rawbuf[i * this->spec.size]; } this->hidden->next_buffer = 0; return 0; } static void PSPAUDIO_PlayDevice(_THIS) { Uint8 *mixbuf = this->hidden->mixbufs[this->hidden->next_buffer]; if (this->spec.channels == 1) { sceAudioOutputBlocking(this->hidden->channel, PSP_AUDIO_VOLUME_MAX, mixbuf); } else { sceAudioOutputPannedBlocking(this->hidden->channel, PSP_AUDIO_VOLUME_MAX, PSP_AUDIO_VOLUME_MAX, mixbuf); } this->hidden->next_buffer = (this->hidden->next_buffer + 1) % NUM_BUFFERS; } /* This function waits until it is possible to write a full sound buffer */ static void PSPAUDIO_WaitDevice(_THIS) { /* Because we block when sending audio, there's no need for this function to do anything. */ } static Uint8 *PSPAUDIO_GetDeviceBuf(_THIS) { return this->hidden->mixbufs[this->hidden->next_buffer]; } static void PSPAUDIO_CloseDevice(_THIS) { if (this->hidden->channel >= 0) { sceAudioChRelease(this->hidden->channel); } free(this->hidden->rawbuf); /* this uses memalign(), not SDL_malloc(). */ SDL_free(this->hidden); } static void PSPAUDIO_ThreadInit(_THIS) { /* Increase the priority of this audio thread by 1 to put it ahead of other SDL threads. */ SceUID thid; SceKernelThreadInfo status; thid = sceKernelGetThreadId(); status.size = sizeof(SceKernelThreadInfo); if (sceKernelReferThreadStatus(thid, &status) == 0) { sceKernelChangeThreadPriority(thid, status.currentPriority - 1); } } static int PSPAUDIO_Init(SDL_AudioDriverImpl * impl) { /* Set the function pointers */ impl->OpenDevice = PSPAUDIO_OpenDevice; impl->PlayDevice = PSPAUDIO_PlayDevice; impl->WaitDevice = PSPAUDIO_WaitDevice; impl->GetDeviceBuf = PSPAUDIO_GetDeviceBuf; impl->CloseDevice = PSPAUDIO_CloseDevice; impl->ThreadInit = PSPAUDIO_ThreadInit; /* PSP audio device */ impl->OnlyHasDefaultOutputDevice = 1; /* impl->HasCaptureSupport = 1; impl->OnlyHasDefaultCaptureDevice = 1; */ /* impl->DetectDevices = DSOUND_DetectDevices; impl->Deinitialize = DSOUND_Deinitialize; */ return 1; /* this audio target is available. */ } AudioBootStrap PSPAUDIO_bootstrap = { "psp", "PSP audio driver", PSPAUDIO_Init, 0 }; /* SDL_AUDI */ #endif /* SDL_AUDIO_DRIVER_PSP */ /* vi: set ts=4 sw=4 expandtab: */
joncampbell123/dosbox-rewrite
vs2015/sdl2/src/audio/psp/SDL_pspaudio.c
C
gpl-2.0
5,604
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; only version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/mm.h> #include <linux/module.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pm_qos.h> #include <linux/uio.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/compress_offload.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #include <sound/minors.h> #include <asm/io.h> #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) #include <dma-coherence.h> #endif //htc audio ++ #include <sound/soc.h> #undef pr_info #undef pr_err #define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__) //htc audio -- /* * Compatibility */ struct snd_pcm_hw_params_old { unsigned int flags; unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT - SNDRV_PCM_HW_PARAM_ACCESS + 1]; struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME - SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1]; unsigned int rmask; unsigned int cmask; unsigned int info; unsigned int msbits; unsigned int rate_num; unsigned int rate_den; snd_pcm_uframes_t fifo_size; unsigned char reserved[64]; }; #ifdef CONFIG_SND_SUPPORT_OLD_API #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old) #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old) static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); #endif static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); /* * */ DEFINE_RWLOCK(snd_pcm_link_rwlock); EXPORT_SYMBOL(snd_pcm_link_rwlock); static DECLARE_RWSEM(snd_pcm_link_rwsem); static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) { struct snd_pcm_runtime *runtime; struct snd_pcm *pcm = substream->pcm; struct snd_pcm_str *pstr = substream->pstr; memset(info, 0, sizeof(*info)); info->card = pcm->card->number; info->device = pcm->device; info->stream = substream->stream; info->subdevice = substream->number; strlcpy(info->id, pcm->id, sizeof(info->id)); strlcpy(info->name, pcm->name, sizeof(info->name)); info->dev_class = pcm->dev_class; info->dev_subclass = pcm->dev_subclass; info->subdevices_count = pstr->substream_count; info->subdevices_avail = pstr->substream_count - pstr->substream_opened; strlcpy(info->subname, substream->name, sizeof(info->subname)); runtime = substream->runtime; /* AB: FIXME!!! This is definitely nonsense */ if (runtime) { info->sync = runtime->sync; substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info); } return 0; } int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user * _info) { struct snd_pcm_info *info; int err; info = kmalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; err = snd_pcm_info(substream, info); if (err >= 0) { if (copy_to_user(_info, info, sizeof(*info))) err = -EFAULT; } kfree(info); return err; } #undef RULES_DEBUG #if 1 //htc audio #define HW_PARAM(v) [SNDRV_PCM_HW_PARAM_##v] = #v static const char * const snd_pcm_hw_param_names[] = { HW_PARAM(ACCESS), HW_PARAM(FORMAT), HW_PARAM(SUBFORMAT), HW_PARAM(SAMPLE_BITS), HW_PARAM(FRAME_BITS), HW_PARAM(CHANNELS), HW_PARAM(RATE), HW_PARAM(PERIOD_TIME), HW_PARAM(PERIOD_SIZE), HW_PARAM(PERIOD_BYTES), HW_PARAM(PERIODS), HW_PARAM(BUFFER_TIME), HW_PARAM(BUFFER_SIZE), HW_PARAM(BUFFER_BYTES), HW_PARAM(TICK_TIME), }; #endif int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { unsigned int k; struct snd_pcm_hardware *hw; struct snd_interval *i = NULL; struct snd_mask *m = NULL; struct snd_pcm_hw_constraints *constrs = &substream->runtime->hw_constraints; unsigned int rstamps[constrs->rules_num]; unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1]; unsigned int stamp = 2; int changed, again; params->info = 0; params->fifo_size = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS)) params->msbits = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) { params->rate_num = 0; params->rate_den = 0; } for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { m = hw_param_mask(params, k); if (snd_mask_empty(m)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); printk("%04x%04x%04x%04x -> ", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif changed = snd_mask_refine(m, constrs_mask(constrs, k)); #ifdef RULES_DEBUG printk("%04x%04x%04x%04x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) { //htc audio ++ pr_info("refine mask %s \n",snd_pcm_hw_param_names[k]); pr_info("fail mask 0x%x 0x%x 0x%x 0x%x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); //htc audio -- return changed; } } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { i = hw_param_interval(params, k); if (snd_interval_empty(i)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); printk(" -> "); #endif changed = snd_interval_refine(i, constrs_interval(constrs, k)); #ifdef RULES_DEBUG if (i->empty) printk("empty\n"); else printk("%c%u %u%c\n", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) { //htc audio ++ pr_info("refine interval %s fail\n",snd_pcm_hw_param_names[k]); pr_info("fail max %u min %u\n",i->max,i->min); //htc audio -- return changed; } } for (k = 0; k < constrs->rules_num; k++) rstamps[k] = 0; for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0; do { again = 0; for (k = 0; k < constrs->rules_num; k++) { struct snd_pcm_hw_rule *r = &constrs->rules[k]; unsigned int d; int doit = 0; if (r->cond && !(r->cond & params->flags)) continue; for (d = 0; r->deps[d] >= 0; d++) { if (vstamps[r->deps[d]] > rstamps[k]) { doit = 1; break; } } if (!doit) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "Rule %d [%p]: ", k, r->func); if (r->var >= 0) { printk("%s = ", snd_pcm_hw_param_names[r->var]); if (hw_is_mask(r->var)) { m = hw_param_mask(params, r->var); printk("%x", *m->bits); } else { i = hw_param_interval(params, r->var); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } #endif changed = r->func(params, r); #ifdef RULES_DEBUG if (r->var >= 0) { printk(" -> "); if (hw_is_mask(r->var)) printk("%x", *m->bits); else { if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } printk("\n"); #endif rstamps[k] = stamp; if (changed && r->var >= 0) { params->cmask |= (1 << r->var); vstamps[r->var] = stamp; again = 1; } if (changed < 0) { //htc audio ++ pr_info("refine rule %s fail",snd_pcm_hw_param_names[r->var]); if (hw_is_mask(r->var)) { m = hw_param_mask(params, r->var); pr_info("fail rule mask %x", *m->bits); } else if (r->var >= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL && r->var <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL) { i = hw_param_interval(params, r->var); if (i->empty) pr_info("empty"); else pr_info("fail rule max %u min %u",i->max,i->min); } //htc audio -- return changed; } stamp++; } } while (again); if (!params->msbits) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (snd_interval_single(i)) params->msbits = snd_interval_value(i); } if (!params->rate_den) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (snd_interval_single(i)) { params->rate_num = snd_interval_value(i); params->rate_den = 1; } } hw = &substream->runtime->hw; if (!params->info) params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES; if (!params->fifo_size) { m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); if (snd_mask_min(m) == snd_mask_max(m) && snd_interval_min(i) == snd_interval_max(i)) { changed = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_FIFO_SIZE, params); if (changed < 0) return changed; } } params->rmask = 0; return 0; } EXPORT_SYMBOL(snd_pcm_hw_refine); static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; pr_info("%s: ++\n",__func__); params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_refine(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } pr_info("%s: --\n",__func__); kfree(params); return err; } static int period_to_usecs(struct snd_pcm_runtime *runtime) { int usecs; if (! runtime->rate) return -1; /* invalid */ /* take 75% of period time as the deadline */ usecs = (750000 / runtime->rate) * runtime->period_size; usecs += ((750000 % runtime->rate) * runtime->period_size) / runtime->rate; return usecs; } static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime; int err, usecs; unsigned int bits; snd_pcm_uframes_t frames; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); #if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) if (!substream->oss.oss) #endif if (atomic_read(&substream->mmap_count)) return -EBADFD; params->rmask = ~0U; err = snd_pcm_hw_refine(substream, params); if (err < 0) goto _error; err = snd_pcm_hw_params_choose(substream, params); if (err < 0) goto _error; if (substream->ops->hw_params != NULL) { err = substream->ops->hw_params(substream, params); if (err < 0) goto _error; } runtime->access = params_access(params); runtime->format = params_format(params); runtime->subformat = params_subformat(params); runtime->channels = params_channels(params); runtime->rate = params_rate(params); runtime->period_size = params_period_size(params); runtime->periods = params_periods(params); runtime->buffer_size = params_buffer_size(params); runtime->info = params->info; runtime->rate_num = params->rate_num; runtime->rate_den = params->rate_den; runtime->no_period_wakeup = (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); bits = snd_pcm_format_physical_width(runtime->format); runtime->sample_bits = bits; bits *= runtime->channels; runtime->frame_bits = bits; frames = 1; while (bits % 8 != 0) { bits *= 2; frames *= 2; } runtime->byte_align = bits / 8; runtime->min_align = frames; /* Default sw params */ runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; runtime->period_step = 1; runtime->control->avail_min = runtime->period_size; runtime->start_threshold = 1; runtime->stop_threshold = runtime->buffer_size; runtime->silence_threshold = 0; runtime->silence_size = 0; runtime->boundary = runtime->buffer_size; while (runtime->boundary * 2 * runtime->channels <= LONG_MAX - runtime->buffer_size) runtime->boundary *= 2; snd_pcm_timer_resolution_change(substream); runtime->status->state = SNDRV_PCM_STATE_SETUP; if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if ((usecs = period_to_usecs(runtime)) >= 0) pm_qos_add_request(&substream->latency_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, usecs); return 0; _error: /* hardware might be unusable from this time, so we force application to retry to set the correct hardware parameter settings */ runtime->status->state = SNDRV_PCM_STATE_OPEN; if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); return err; } static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; pr_info("%s ++\n",__func__); params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_params(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } pr_info("%s --\n",__func__); kfree(params); return err; } static int snd_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (atomic_read(&substream->mmap_count)) return -EBADFD; if (substream->ops->hw_free) result = substream->ops->hw_free(substream); runtime->status->state = SNDRV_PCM_STATE_OPEN; pm_qos_remove_request(&substream->latency_pm_qos_req); return result; } static int snd_pcm_sw_params(struct snd_pcm_substream *substream, struct snd_pcm_sw_params *params) { struct snd_pcm_runtime *runtime; int err; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST) return -EINVAL; if (params->avail_min == 0) return -EINVAL; if (params->silence_size >= runtime->boundary) { if (params->silence_threshold != 0) return -EINVAL; } else { if (params->silence_size > params->silence_threshold) return -EINVAL; if (params->silence_threshold > runtime->buffer_size) return -EINVAL; } err = 0; pr_info("%s +++", __func__); snd_pcm_stream_lock_irq(substream); runtime->tstamp_mode = params->tstamp_mode; runtime->period_step = params->period_step; runtime->control->avail_min = params->avail_min; runtime->start_threshold = params->start_threshold; runtime->stop_threshold = params->stop_threshold; runtime->silence_threshold = params->silence_threshold; runtime->silence_size = params->silence_size; params->boundary = runtime->boundary; if (snd_pcm_running(substream)) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); err = snd_pcm_update_state(substream, runtime); } pr_info("%s ---", __func__); snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_sw_params __user * _params) { struct snd_pcm_sw_params params; int err; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; err = snd_pcm_sw_params(substream, &params); if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } int snd_pcm_status(struct snd_pcm_substream *substream, struct snd_pcm_status *status) { struct snd_pcm_runtime *runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); status->state = runtime->status->state; status->suspended_state = runtime->status->suspended_state; if (status->state == SNDRV_PCM_STATE_OPEN) goto _end; status->trigger_tstamp = runtime->trigger_tstamp; if (snd_pcm_running(substream)) { snd_pcm_update_hw_ptr(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { status->tstamp = runtime->status->tstamp; goto _tstamp_end; } } snd_pcm_gettime(runtime, &status->tstamp); _tstamp_end: status->appl_ptr = runtime->control->appl_ptr; status->hw_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { status->avail = snd_pcm_playback_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING || runtime->status->state == SNDRV_PCM_STATE_DRAINING) { status->delay = runtime->buffer_size - status->avail; status->delay += runtime->delay; } else status->delay = 0; } else { status->avail = snd_pcm_capture_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) status->delay = status->avail + runtime->delay; else status->delay = 0; } status->avail_max = runtime->avail_max; status->overrange = runtime->overrange; runtime->avail_max = 0; runtime->overrange = 0; _end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return 0; } static int snd_pcm_status_user(struct snd_pcm_substream *substream, struct snd_pcm_status __user * _status) { struct snd_pcm_status status; int res; memset(&status, 0, sizeof(status)); res = snd_pcm_status(substream, &status); if (res < 0) return res; if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_pcm_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_channel_info * info) { struct snd_pcm_runtime *runtime; unsigned int channel; channel = info->channel; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (channel >= runtime->channels) return -EINVAL; memset(info, 0, sizeof(*info)); info->channel = channel; return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info); } static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, struct snd_pcm_channel_info __user * _info) { struct snd_pcm_channel_info info; int res; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; res = snd_pcm_channel_info(substream, &info); if (res < 0) return res; if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master == NULL) return; if (runtime->trigger_master == substream) { snd_pcm_gettime(runtime, &runtime->trigger_tstamp); } else { snd_pcm_trigger_tstamp(runtime->trigger_master); runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp; } runtime->trigger_master = NULL; } struct action_ops { int (*pre_action)(struct snd_pcm_substream *substream, int state); int (*do_action)(struct snd_pcm_substream *substream, int state); void (*undo_action)(struct snd_pcm_substream *substream, int state); void (*post_action)(struct snd_pcm_substream *substream, int state); }; /* * this functions is core for handling of linked stream * Note: the stream state might be changed also on failure * Note2: call with calling stream lock + link lock */ static int snd_pcm_action_group(struct action_ops *ops, struct snd_pcm_substream *substream, int state, int do_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0; snd_pcm_group_for_each_entry(s, substream) { if (do_lock && s != substream) spin_lock_nested(&s->self_group.lock, SINGLE_DEPTH_NESTING); res = ops->pre_action(s, state); if (res < 0) goto _unlock; } snd_pcm_group_for_each_entry(s, substream) { res = ops->do_action(s, state); if (res < 0) { if (ops->undo_action) { snd_pcm_group_for_each_entry(s1, substream) { if (s1 == s) /* failed stream */ break; ops->undo_action(s1, state); } } s = NULL; /* unlock all */ goto _unlock; } } snd_pcm_group_for_each_entry(s, substream) { ops->post_action(s, state); } _unlock: if (do_lock) { /* unlock streams */ snd_pcm_group_for_each_entry(s1, substream) { if (s1 != substream) spin_unlock(&s1->self_group.lock); if (s1 == s) /* end */ break; } } return res; } /* * Note: call with stream lock */ static int snd_pcm_action_single(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; res = ops->pre_action(substream, state); if (res < 0) return res; res = ops->do_action(substream, state); if (res == 0) ops->post_action(substream, state); else if (ops->undo_action) ops->undo_action(substream, state); return res; } /* * Note: call with stream lock */ static int snd_pcm_action(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; if (snd_pcm_stream_linked(substream)) { if (!spin_trylock(&substream->group->lock)) { spin_unlock(&substream->self_group.lock); spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); } res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->group->lock); } else { res = snd_pcm_action_single(ops, substream, state); } return res; } /* * Note: don't use any locks before */ static int snd_pcm_action_lock_irq(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; read_lock_irq(&snd_pcm_link_rwlock); if (snd_pcm_stream_linked(substream)) { spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->self_group.lock); spin_unlock(&substream->group->lock); } else { spin_lock(&substream->self_group.lock); res = snd_pcm_action_single(ops, substream, state); spin_unlock(&substream->self_group.lock); } read_unlock_irq(&snd_pcm_link_rwlock); return res; } /* */ static int snd_pcm_action_nonatomic(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; down_read(&snd_pcm_link_rwsem); if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, 0); else res = snd_pcm_action_single(ops, substream, state); up_read(&snd_pcm_link_rwsem); return res; } /* * start callbacks */ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != SNDRV_PCM_STATE_PREPARED) return -EBADFD; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !substream->hw_no_buffer && !snd_pcm_playback_data(substream)) return -EPIPE; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master != substream) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); } static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); } static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); runtime->hw_ptr_jiffies = jiffies; runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) / runtime->rate; runtime->status->state = state; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTART, &runtime->trigger_tstamp); } static struct action_ops snd_pcm_action_start = { .pre_action = snd_pcm_pre_start, .do_action = snd_pcm_do_start, .undo_action = snd_pcm_undo_start, .post_action = snd_pcm_post_start }; /** * snd_pcm_start - start all linked streams * @substream: the PCM substream instance */ int snd_pcm_start(struct snd_pcm_substream *substream) { return snd_pcm_action(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); } /* * stop callbacks */ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); return 0; /* unconditonally stop all substreams */ } static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != state) { snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP, &runtime->trigger_tstamp); runtime->status->state = state; } wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_stop = { .pre_action = snd_pcm_pre_stop, .do_action = snd_pcm_do_stop, .post_action = snd_pcm_post_stop }; /** * snd_pcm_stop - try to stop all running streams in the substream group * @substream: the PCM substream instance * @state: PCM state after stopping the stream * * The state of each stream is then changed to the given state unconditionally. */ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) { return snd_pcm_action(&snd_pcm_action_stop, substream, state); } EXPORT_SYMBOL(snd_pcm_stop); /** * snd_pcm_drain_done - stop the DMA only when the given stream is playback * @substream: the PCM substream * * After stopping, the state is changed to SETUP. * Unlike snd_pcm_stop(), this affects only the given stream. */ int snd_pcm_drain_done(struct snd_pcm_substream *substream) { return snd_pcm_action_single(&snd_pcm_action_stop, substream, SNDRV_PCM_STATE_SETUP); } /* * pause callbacks */ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_PAUSE)) return -ENOSYS; if (push) { if (runtime->status->state != SNDRV_PCM_STATE_RUNNING) return -EBADFD; } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master != substream) return 0; /* some drivers might use hw_ptr to recover from the pause - update the hw_ptr now */ if (push) snd_pcm_update_hw_ptr(substream); /* The jiffies check in snd_pcm_update_hw_ptr*() is done by * a delta between the current jiffies, this gives a large enough * delta, effectively to skip the check once. */ substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : SNDRV_PCM_TRIGGER_PAUSE_RELEASE); } static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE : SNDRV_PCM_TRIGGER_PAUSE_PUSH); } static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (push) { runtime->status->state = SNDRV_PCM_STATE_PAUSED; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MPAUSE, &runtime->trigger_tstamp); wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } else { runtime->status->state = SNDRV_PCM_STATE_RUNNING; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MCONTINUE, &runtime->trigger_tstamp); } } static struct action_ops snd_pcm_action_pause = { .pre_action = snd_pcm_pre_pause, .do_action = snd_pcm_do_pause, .undo_action = snd_pcm_undo_pause, .post_action = snd_pcm_post_pause }; /* * Push/release the pause for all linked streams. */ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) { return snd_pcm_action(&snd_pcm_action_pause, substream, push); } #ifdef CONFIG_PM /* suspend */ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBUSY; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; if (! snd_pcm_running(substream)) return 0; substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); return 0; /* suspend unconditionally */ } static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp); runtime->status->suspended_state = runtime->status->state; runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_suspend = { .pre_action = snd_pcm_pre_suspend, .do_action = snd_pcm_do_suspend, .post_action = snd_pcm_post_suspend }; /** * snd_pcm_suspend - trigger SUSPEND to all linked streams * @substream: the PCM substream * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend(struct snd_pcm_substream *substream) { int err; unsigned long flags; if (! substream) return 0; snd_pcm_stream_lock_irqsave(substream, flags); err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0); snd_pcm_stream_unlock_irqrestore(substream, flags); return err; } EXPORT_SYMBOL(snd_pcm_suspend); /** * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm * @pcm: the PCM instance * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend_all(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; int stream, err = 0; if (! pcm) return 0; for (stream = 0; stream < 2; stream++) { for (substream = pcm->streams[stream].substream; substream; substream = substream->next) { /* FIXME: the open/close code should lock this as well */ if (substream->runtime == NULL) continue; err = snd_pcm_suspend(substream); if (err < 0 && err != -EBUSY) return err; } } return 0; } EXPORT_SYMBOL(snd_pcm_suspend_all); /* resume */ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_RESUME)) return -ENOSYS; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; /* DMA not running previously? */ if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME); } static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); } static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp); runtime->status->state = runtime->status->suspended_state; } static struct action_ops snd_pcm_action_resume = { .pre_action = snd_pcm_pre_resume, .do_action = snd_pcm_do_resume, .undo_action = snd_pcm_undo_resume, .post_action = snd_pcm_post_resume }; static int snd_pcm_resume(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; int res; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0); snd_power_unlock(card); return res; } #else static int snd_pcm_resume(struct snd_pcm_substream *substream) { return -ENOSYS; } #endif /* CONFIG_PM */ /* * xrun ioctl * * Change the RUNNING stream(s) to XRUN state. */ static int snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; struct snd_pcm_runtime *runtime = substream->runtime; int result; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) goto _unlock; } pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: result = 0; /* already there */ break; case SNDRV_PCM_STATE_RUNNING: result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); break; default: result = -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); _unlock: snd_power_unlock(card); return result; } /* * reset ioctl */ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: case SNDRV_PCM_STATE_SUSPENDED: return 0; default: return -EBADFD; } } static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); if (err < 0) return err; runtime->hw_ptr_base = 0; runtime->hw_ptr_interrupt = runtime->status->hw_ptr - runtime->status->hw_ptr % runtime->period_size; runtime->silence_start = runtime->status->hw_ptr; runtime->silence_filled = 0; return 0; } static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); } static struct action_ops snd_pcm_action_reset = { .pre_action = snd_pcm_pre_reset, .do_action = snd_pcm_do_reset, .post_action = snd_pcm_post_reset }; static int snd_pcm_reset(struct snd_pcm_substream *substream) { return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0); } /* * prepare ioctl */ /* we use the second argument for updating f_flags */ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, int f_flags) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD; if (snd_pcm_running(substream)) return -EBUSY; substream->f_flags = f_flags; return 0; } static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state) { int err; err = substream->ops->prepare(substream); if (err < 0) return err; return snd_pcm_do_reset(substream, 0); } static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; runtime->status->state = SNDRV_PCM_STATE_PREPARED; } static struct action_ops snd_pcm_action_prepare = { .pre_action = snd_pcm_pre_prepare, .do_action = snd_pcm_do_prepare, .post_action = snd_pcm_post_prepare }; /** * snd_pcm_prepare - prepare the PCM substream to be triggerable * @substream: the PCM substream instance * @file: file to refer f_flags */ static int snd_pcm_prepare(struct snd_pcm_substream *substream, struct file *file) { int res; struct snd_card *card = substream->pcm->card; int f_flags; if (file) f_flags = file->f_flags; else f_flags = substream->f_flags; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare, substream, f_flags); snd_power_unlock(card); return res; } /* * drain ioctl */ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) { substream->runtime->trigger_master = substream; return 0; } static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: /* start playback stream if possible */ if (! snd_pcm_playback_empty(substream)) { snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); } break; case SNDRV_PCM_STATE_RUNNING: runtime->status->state = SNDRV_PCM_STATE_DRAINING; break; default: break; } } else { /* stop running stream */ if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { int new_state = snd_pcm_capture_avail(runtime) > 0 ? SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; snd_pcm_do_stop(substream, new_state); snd_pcm_post_stop(substream, new_state); } } return 0; } static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state) { } static struct action_ops snd_pcm_action_drain_init = { .pre_action = snd_pcm_pre_drain_init, .do_action = snd_pcm_do_drain_init, .post_action = snd_pcm_post_drain_init }; static int snd_pcm_drop(struct snd_pcm_substream *substream); /* * Drain the stream(s). * When the substream is linked, sync until the draining of all playback streams * is finished. * After this call, all streams are supposed to be either SETUP or DRAINING * (capture only) state. */ static int snd_pcm_drain(struct snd_pcm_substream *substream, struct file *file) { struct snd_card *card; struct snd_pcm_runtime *runtime; struct snd_pcm_substream *s; wait_queue_t wait; int result = 0; int nonblock = 0; card = substream->pcm->card; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) { snd_power_unlock(card); return result; } } if (file) { if (file->f_flags & O_NONBLOCK) nonblock = 1; } else if (substream->f_flags & O_NONBLOCK) nonblock = 1; down_read(&snd_pcm_link_rwsem); pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); /* pre-start/stop - all running streams are changed to DRAINING state */ result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); if (result < 0) goto unlock; /* in non-blocking, we don't wait in ioctl but let caller poll */ if (nonblock) { result = -EAGAIN; goto unlock; } for (;;) { long tout; struct snd_pcm_runtime *to_check; if (signal_pending(current)) { result = -ERESTARTSYS; break; } /* find a substream to drain */ to_check = NULL; snd_pcm_group_for_each_entry(s, substream) { if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) continue; runtime = s->runtime; if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { to_check = runtime; break; } } if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); add_wait_queue(&to_check->sleep, &wait); pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); if (runtime->no_period_wakeup) tout = MAX_SCHEDULE_TIMEOUT; else { tout = 10; if (runtime->rate) { long t = runtime->period_size * 2 / runtime->rate; tout = max(t, tout); } tout = msecs_to_jiffies(tout * 1000); } tout = schedule_timeout_interruptible(tout); snd_power_lock(card); down_read(&snd_pcm_link_rwsem); pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); remove_wait_queue(&to_check->sleep, &wait); if (tout == 0) { if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) result = -ESTRPIPE; else { snd_printd("playback drain error (DMA or IRQ trouble?)\n"); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); result = -EIO; } break; } } unlock: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); return result; } static int snd_compressed_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { struct snd_pcm_runtime *runtime; int err = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_debug("%s called with cmd = %d\n", __func__, cmd); err = substream->ops->ioctl(substream, cmd, arg); return err; } static int snd_user_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { struct snd_pcm_runtime *runtime; int err = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; err = substream->ops->ioctl(substream, cmd, arg); return err; } /* * drop ioctl * * Immediately put all linked substreams into SETUP state. */ static int snd_pcm_drop(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBADFD; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return result; } /* WARNING: Don't forget to fput back the file */ static struct file *snd_pcm_file_fd(int fd) { struct file *file; struct inode *inode; unsigned int minor; file = fget(fd); if (!file) return NULL; inode = file_inode(file); if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major) { fput(file); return NULL; } minor = iminor(inode); if (!snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) && !snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE)) { fput(file); return NULL; } return file; } /* * PCM link handling */ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) { int res = 0; struct file *file; struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream1; struct snd_pcm_group *group; file = snd_pcm_file_fd(fd); if (!file) return -EBADFD; pcm_file = file->private_data; substream1 = pcm_file->substream; group = kmalloc(sizeof(*group), GFP_KERNEL); if (!group) { res = -ENOMEM; goto _nolock; } down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || substream->runtime->status->state != substream1->runtime->status->state) { res = -EBADFD; goto _end; } if (snd_pcm_stream_linked(substream1)) { res = -EALREADY; goto _end; } if (!snd_pcm_stream_linked(substream)) { substream->group = group; spin_lock_init(&substream->group->lock); INIT_LIST_HEAD(&substream->group->substreams); list_add_tail(&substream->link_list, &substream->group->substreams); substream->group->count = 1; } list_add_tail(&substream1->link_list, &substream->group->substreams); substream->group->count++; substream1->group = substream->group; _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); _nolock: fput(file); if (res < 0) kfree(group); return res; } static void relink_to_local(struct snd_pcm_substream *substream) { substream->group = &substream->self_group; INIT_LIST_HEAD(&substream->self_group.substreams); list_add_tail(&substream->link_list, &substream->self_group.substreams); } static int snd_pcm_unlink(struct snd_pcm_substream *substream) { struct snd_pcm_substream *s; int res = 0; down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (!snd_pcm_stream_linked(substream)) { res = -EALREADY; goto _end; } list_del(&substream->link_list); substream->group->count--; if (substream->group->count == 1) { /* detach the last stream, too */ snd_pcm_group_for_each_entry(s, substream) { relink_to_local(s); break; } kfree(substream->group); } relink_to_local(substream); _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); return res; } /* * hw configurator */ static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mul(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_div(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), (unsigned long) rule->private, &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]), (unsigned long) rule->private, hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int k; struct snd_interval *i = hw_param_interval(params, rule->deps[0]); struct snd_mask m; struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); snd_mask_any(&m); for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(mask, k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if ((unsigned)bits < i->min || (unsigned)bits > i->max) snd_mask_reset(&m, k); } return snd_mask_refine(mask, &m); } static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; unsigned int k; t.min = UINT_MAX; t.max = 0; t.openmin = 0; t.openmax = 0; for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if (t.min > (unsigned)bits) t.min = bits; if (t.max < (unsigned)bits) t.max = bits; } t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 #error "Change this table" #endif static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000 }; const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = { .count = ARRAY_SIZE(rates), .list = rates, }; static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hardware *hw = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), snd_pcm_known_rates.count, snd_pcm_known_rates.list, hw->rates); } static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; struct snd_pcm_substream *substream = rule->private; t.min = 0; t.max = substream->buffer_bytes_max; t.openmin = 0; t.openmax = 0; t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; int k, err; for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { snd_mask_any(constrs_mask(constrs, k)); } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { snd_interval_any(constrs_interval(constrs, k)); } snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS)); err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pcm_hw_rule_format, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_sample_bits, NULL, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; return 0; } int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; int err; unsigned int mask = 0; if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_MMAP) { if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_COMPLEX) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX; } err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask); if (err < 0) return err; err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats); if (err < 0) return err; err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, hw->channels_min, hw->channels_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, hw->rate_min, hw->rate_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, hw->period_bytes_min, hw->period_bytes_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, hw->periods_min, hw->periods_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, hw->period_bytes_min, hw->buffer_bytes_max); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_buffer_bytes_max, substream, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1); if (err < 0) return err; /* FIXME: remove */ if (runtime->dma_bytes) { err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); if (err < 0) return -EINVAL; } if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_rate, hw, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; } /* FIXME: this belong to lowlevel */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return 0; } static void pcm_release_private(struct snd_pcm_substream *substream) { snd_pcm_unlink(substream); } void snd_pcm_release_substream(struct snd_pcm_substream *substream) { substream->ref_count--; if (substream->ref_count > 0) return; snd_pcm_drop(substream); if (substream->hw_opened) { if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); substream->ops->close(substream); substream->hw_opened = 0; } if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if (substream->pcm_release) { substream->pcm_release(substream); substream->pcm_release = NULL; } snd_pcm_detach_substream(substream); } EXPORT_SYMBOL(snd_pcm_release_substream); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream) { struct snd_pcm_substream *substream; int err; err = snd_pcm_attach_substream(pcm, stream, file, &substream); if (err < 0) return err; if (substream->ref_count > 1) { *rsubstream = substream; return 0; } err = snd_pcm_hw_constraints_init(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_init failed\n"); goto error; } if (substream->ops == NULL) { snd_printd("cannot open back end PCMs directly\n"); err = -ENODEV; goto error; } if ((err = substream->ops->open(substream)) < 0) goto error; substream->hw_opened = 1; err = snd_pcm_hw_constraints_complete(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_complete failed\n"); goto error; } *rsubstream = substream; return 0; error: snd_pcm_release_substream(substream); return err; } EXPORT_SYMBOL(snd_pcm_open_substream); static int snd_pcm_open_file(struct file *file, struct snd_pcm *pcm, int stream) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; int err; err = snd_pcm_open_substream(pcm, stream, file, &substream); if (err < 0) return err; pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); if (pcm_file == NULL) { snd_pcm_release_substream(substream); return -ENOMEM; } pcm_file->substream = substream; if (substream->ref_count == 1) { substream->file = pcm_file; substream->pcm_release = pcm_release_private; } file->private_data = pcm_file; return 0; } static int snd_pcm_playback_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_PLAYBACK); return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_pcm_capture_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_CAPTURE); return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); } static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) { int err; wait_queue_t wait; if (pcm == NULL) { err = -ENODEV; goto __error1; } err = snd_card_file_add(pcm->card, file); if (err < 0) goto __error1; if (!try_module_get(pcm->card->module)) { err = -EFAULT; goto __error2; } init_waitqueue_entry(&wait, current); add_wait_queue(&pcm->open_wait, &wait); mutex_lock(&pcm->open_mutex); while (1) { err = snd_pcm_open_file(file, pcm, stream); if (err >= 0) break; if (err == -EAGAIN) { if (file->f_flags & O_NONBLOCK) { err = -EBUSY; break; } } else break; set_current_state(TASK_INTERRUPTIBLE); mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } remove_wait_queue(&pcm->open_wait, &wait); mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; return err; __error: module_put(pcm->card->module); __error2: snd_card_file_remove(pcm->card, file); __error1: return err; } static int snd_pcm_release(struct inode *inode, struct file *file) { struct snd_pcm *pcm; struct snd_pcm_substream *substream; struct snd_pcm_file *pcm_file; pcm_file = file->private_data; substream = pcm_file->substream; if (snd_BUG_ON(!substream)) return -ENXIO; pcm = substream->pcm; mutex_lock(&pcm->open_mutex); snd_pcm_release_substream(substream); kfree(pcm_file); mutex_unlock(&pcm->open_mutex); wake_up(&pcm->open_wait); module_put(pcm->card->module); snd_card_file_remove(pcm->card, file); return 0; } static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_playback_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_capture_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_playback_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_capture_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static int snd_pcm_hwsync(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_delay(struct snd_pcm_substream *substream, snd_pcm_sframes_t __user *res) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_sframes_t n = 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) n = snd_pcm_playback_hw_avail(runtime); else n = snd_pcm_capture_avail(runtime); n += runtime->delay; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (!err) if (put_user(n, res)) err = -EFAULT; return err; } static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, struct snd_pcm_sync_ptr __user *_sync_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_sync_ptr sync_ptr; volatile struct snd_pcm_mmap_status *status; volatile struct snd_pcm_mmap_control *control; int err; snd_pcm_uframes_t hw_avail; memset(&sync_ptr, 0, sizeof(sync_ptr)); if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags))) return -EFAULT; if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) return -EFAULT; status = runtime->status; control = runtime->control; if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) { err = snd_pcm_hwsync(substream); if (err < 0) return err; } snd_pcm_stream_lock_irq(substream); if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) control->appl_ptr = sync_ptr.c.control.appl_ptr; else sync_ptr.c.control.appl_ptr = control->appl_ptr; if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) control->avail_min = sync_ptr.c.control.avail_min; else sync_ptr.c.control.avail_min = control->avail_min; if (runtime->render_flag & SNDRV_NON_DMA_MODE) { hw_avail = snd_pcm_playback_hw_avail(runtime); if ((hw_avail >= runtime->start_threshold) && (runtime->render_flag & SNDRV_RENDER_STOPPED)) { if (substream->ops->restart) substream->ops->restart(substream); } } sync_ptr.s.status.state = status->state; sync_ptr.s.status.hw_ptr = status->hw_ptr; sync_ptr.s.status.tstamp = status->tstamp; sync_ptr.s.status.suspended_state = status->suspended_state; snd_pcm_stream_unlock_irq(substream); if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) return -EFAULT; return 0; } static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) { struct snd_pcm_runtime *runtime = substream->runtime; int arg; if (get_user(arg, _arg)) return -EFAULT; if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST) return -EINVAL; runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY; if (arg == SNDRV_PCM_TSTAMP_TYPE_MONOTONIC) runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC; return 0; } //htc audio ++ static int snd_pcm_enable_effect(struct snd_pcm_substream *substream, int __user *_arg) { /* if substream is NULL, return error. */ if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; pr_info("%s: is called\n", __func__); return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_ENABLE_EFFECT, _arg); } //htc audio -- static int snd_pcm_common_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { switch (cmd) { case SNDRV_PCM_IOCTL_PVERSION: return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; case SNDRV_PCM_IOCTL_INFO: return snd_pcm_info_user(substream, arg); case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ return 0; case SNDRV_PCM_IOCTL_TTSTAMP: return snd_pcm_tstamp(substream, arg); case SNDRV_PCM_IOCTL_HW_REFINE: return snd_pcm_hw_refine_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS: return snd_pcm_hw_params_user(substream, arg); case SNDRV_PCM_IOCTL_HW_FREE: return snd_pcm_hw_free(substream); case SNDRV_PCM_IOCTL_SW_PARAMS: return snd_pcm_sw_params_user(substream, arg); case SNDRV_PCM_IOCTL_STATUS: return snd_pcm_status_user(substream, arg); case SNDRV_PCM_IOCTL_CHANNEL_INFO: return snd_pcm_channel_info_user(substream, arg); case SNDRV_PCM_IOCTL_PREPARE: return snd_pcm_prepare(substream, file); case SNDRV_PCM_IOCTL_RESET: return snd_pcm_reset(substream); case SNDRV_PCM_IOCTL_START: return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); case SNDRV_PCM_IOCTL_LINK: return snd_pcm_link(substream, (int)(unsigned long) arg); case SNDRV_PCM_IOCTL_UNLINK: return snd_pcm_unlink(substream); case SNDRV_PCM_IOCTL_RESUME: return snd_pcm_resume(substream); case SNDRV_PCM_IOCTL_XRUN: return snd_pcm_xrun(substream); case SNDRV_PCM_IOCTL_HWSYNC: return snd_pcm_hwsync(substream); case SNDRV_PCM_IOCTL_DELAY: return snd_pcm_delay(substream, arg); case SNDRV_PCM_IOCTL_SYNC_PTR: return snd_pcm_sync_ptr(substream, arg); #ifdef CONFIG_SND_SUPPORT_OLD_API case SNDRV_PCM_IOCTL_HW_REFINE_OLD: return snd_pcm_hw_refine_old_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS_OLD: return snd_pcm_hw_params_old_user(substream, arg); #endif case SNDRV_PCM_IOCTL_DRAIN: return snd_pcm_drain(substream, file); case SNDRV_PCM_IOCTL_DROP: return snd_pcm_drop(substream); case SNDRV_PCM_IOCTL_PAUSE: { int res; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); res = snd_pcm_pause(substream, (int)(unsigned long)arg); pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return res; } //htc audio ++ case SNDRV_PCM_IOCTL_ENABLE_EFFECT: return snd_pcm_enable_effect(substream, arg); //htc audio -- case SNDRV_COMPRESS_GET_CAPS: case SNDRV_COMPRESS_GET_CODEC_CAPS: case SNDRV_COMPRESS_SET_PARAMS: case SNDRV_COMPRESS_GET_PARAMS: case SNDRV_COMPRESS_TSTAMP: case SNDRV_COMPRESS_DRAIN: case SNDRV_COMPRESS_METADATA_MODE: return snd_compressed_ioctl(substream, cmd, arg); default: if (((cmd >> 8) & 0xff) == 'U') return snd_user_ioctl(substream, cmd, arg); } snd_printd("unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } static int snd_pcm_playback_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_WRITEI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_WRITEN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void __user **bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_writev(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static int snd_pcm_capture_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_READI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_READN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void *bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_readv(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; pcm_file = file->private_data; if ((((cmd >> 8) & 0xff) != 'A') && (((cmd >> 8) & 0xff) != 'C')) return -ENOTTY; return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; unsigned char ioctl_magic; pcm_file = file->private_data; ioctl_magic = ((cmd >> 8) & 0xff); if (ioctl_magic != 'A' && ioctl_magic != 'C' && ioctl_magic != 'U') return -ENOTTY; return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { mm_segment_t fs; int result; fs = snd_enter_user(); switch (substream->stream) { case SNDRV_PCM_STREAM_PLAYBACK: result = snd_pcm_playback_ioctl1(NULL, substream, cmd, (void __user *)arg); break; case SNDRV_PCM_STREAM_CAPTURE: result = snd_pcm_capture_ioctl1(NULL, substream, cmd, (void __user *)arg); break; default: result = -EINVAL; break; } snd_leave_user(fs); return result; } EXPORT_SYMBOL(snd_pcm_kernel_ioctl); static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_read(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_write(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 1024 || nr_segs != runtime->channels) return -EINVAL; if (!frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_readv(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 128 || nr_segs != runtime->channels || !frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_writev(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_playback_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLOUT | POLLWRNORM; break; } /* Fall through */ case SNDRV_PCM_STATE_DRAINING: mask = 0; break; default: mask = POLLOUT | POLLWRNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_capture_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLIN | POLLRDNORM; break; } mask = 0; break; case SNDRV_PCM_STATE_DRAINING: if (avail > 0) { mask = POLLIN | POLLRDNORM; break; } /* Fall through */ default: mask = POLLIN | POLLRDNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } /* * mmap support */ /* * Only on coherent architectures, we can mmap the status and the control records * for effcient data transfer. On others, we have to use HWSYNC ioctl... */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) /* * mmap status record */ static int snd_pcm_mmap_status_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->status); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_status = { .fault = snd_pcm_mmap_status_fault, }; static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_status; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } /* * mmap control record */ static int snd_pcm_mmap_control_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->control); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_control = { .fault = snd_pcm_mmap_control_fault, }; static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_control; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } #else /* ! coherent mmap */ /* * don't support mmap for status and control records. */ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } #endif /* coherent mmap */ static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = substream->runtime->dma_area + ofs; #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return virt_to_page(CAC_ADDR(vaddr)); #endif #if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { dma_addr_t addr = substream->runtime->dma_addr + ofs; addr -= get_dma_offset(substream->dma_buffer.dev.dev); /* assume dma_handle set via pfn_to_phys() in * mm/dma-noncoherent.c */ return pfn_to_page(addr >> PAGE_SHIFT); } #endif return virt_to_page(vaddr); } /* * fault callback for mmapping a RAM page */ static int snd_pcm_mmap_data_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; unsigned long offset; struct page * page; size_t dma_bytes; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; offset = vmf->pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if (offset > dma_bytes - PAGE_SIZE) return VM_FAULT_SIGBUS; if (substream->ops->page) page = substream->ops->page(substream, offset); else page = snd_pcm_default_page_ops(substream, offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_data = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, }; static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, .fault = snd_pcm_mmap_data_fault, }; #ifndef ARCH_HAS_DMA_MMAP_COHERENT /* This should be defined / handled globally! */ #ifdef CONFIG_ARM #define ARCH_HAS_DMA_MMAP_COHERENT #endif #endif /* * mmap the DMA buffer on RAM */ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { area->vm_flags |= VM_RESERVED; #ifdef ARCH_HAS_DMA_MMAP_COHERENT if (!substream->ops->page && substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return dma_mmap_coherent(substream->dma_buffer.dev.dev, area, substream->runtime->dma_area, substream->runtime->dma_addr, area->vm_end - area->vm_start); #elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && !plat_device_is_coherent(substream->dma_buffer.dev.dev)) area->vm_page_prot = pgprot_noncached(area->vm_page_prot); #endif /* ARCH_HAS_DMA_MMAP_COHERENT */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); /* * mmap the DMA buffer on I/O memory area */ #if SNDRV_PCM_INFO_MMAP_IOMEM int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { long size; unsigned long offset; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); area->vm_flags |= VM_IO; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; if (io_remap_pfn_range(area, area->vm_start, (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, size, area->vm_page_prot)) return -EAGAIN; return 0; } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); #endif /* SNDRV_PCM_INFO_MMAP */ /* * mmap DMA buffer */ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { struct snd_pcm_runtime *runtime; long size; unsigned long offset; size_t dma_bytes; int err; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (!(area->vm_flags & (VM_WRITE|VM_READ))) return -EINVAL; } else { if (!(area->vm_flags & VM_READ)) return -EINVAL; } runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) return -ENXIO; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if ((size_t)size > dma_bytes) return -EINVAL; if (offset > dma_bytes - size) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_data; area->vm_private_data = substream; if (substream->ops->mmap) err = substream->ops->mmap(substream, area); else err = snd_pcm_lib_default_mmap(substream, area); if (!err) atomic_inc(&substream->mmap_count); return err; } EXPORT_SYMBOL(snd_pcm_mmap_data); static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; unsigned long offset; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; offset = area->vm_pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_status(substream, file, area); case SNDRV_PCM_MMAP_OFFSET_CONTROL: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_control(substream, file, area); default: return snd_pcm_mmap_data(substream, file, area); } return 0; } static int snd_pcm_fasync(int fd, struct file * file, int on) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; return fasync_helper(fd, file, on, &runtime->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "pcm_compat.c" #else #define snd_pcm_ioctl_compat NULL #endif /* * To be removed helpers to keep binary compatibility */ #ifdef CONFIG_SND_SUPPORT_OLD_API #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5)) static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params, struct snd_pcm_hw_params_old *oparams) { unsigned int i; memset(params, 0, sizeof(*params)); params->flags = oparams->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) params->masks[i].bits[0] = oparams->masks[i]; memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals)); params->rmask = __OLD_TO_NEW_MASK(oparams->rmask); params->cmask = __OLD_TO_NEW_MASK(oparams->cmask); params->info = oparams->info; params->msbits = oparams->msbits; params->rate_num = oparams->rate_num; params->rate_den = oparams->rate_den; params->fifo_size = oparams->fifo_size; } static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams, struct snd_pcm_hw_params *params) { unsigned int i; memset(oparams, 0, sizeof(*oparams)); oparams->flags = params->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) oparams->masks[i] = params->masks[i].bits[0]; memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals)); oparams->rmask = __NEW_TO_OLD_MASK(params->rmask); oparams->cmask = __NEW_TO_OLD_MASK(params->cmask); oparams->info = params->info; oparams->msbits = params->msbits; oparams->rate_num = params->rate_num; oparams->rate_den = params->rate_den; oparams->fifo_size = params->fifo_size; } static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_refine(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_params(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } #endif /* CONFIG_SND_SUPPORT_OLD_API */ #ifndef CONFIG_MMU static unsigned long snd_pcm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct snd_pcm_file *pcm_file = file->private_data; struct snd_pcm_substream *substream = pcm_file->substream; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long offset = pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: return (unsigned long)runtime->status; case SNDRV_PCM_MMAP_OFFSET_CONTROL: return (unsigned long)runtime->control; default: return (unsigned long)runtime->dma_area + offset; } } #else # define snd_pcm_get_unmapped_area NULL #endif /* * Register section */ const struct file_operations snd_pcm_f_ops[2] = { { .owner = THIS_MODULE, .write = snd_pcm_write, .aio_write = snd_pcm_aio_write, .open = snd_pcm_playback_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_playback_poll, .unlocked_ioctl = snd_pcm_playback_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, }, { .owner = THIS_MODULE, .read = snd_pcm_read, .aio_read = snd_pcm_aio_read, .open = snd_pcm_capture_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_capture_poll, .unlocked_ioctl = snd_pcm_capture_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, } };
alexey6600/M8_Sense_7.00
sound/core/pcm_native.c
C
gpl-2.0
100,040
/***************************************************************************//** * \file cyutils.c * \version 5.40 * * \brief Provides a function to handle 24-bit value writes. * ******************************************************************************** * \copyright * Copyright 2008-2016, Cypress Semiconductor Corporation. All rights reserved. * You may use this file only in accordance with the license, terms, conditions, * disclaimers, and limitations in the end user license agreement accompanying * the software package with which this file was provided. *******************************************************************************/ #include "cytypes.h" #if (!CY_PSOC3) /*************************************************************************** * Function Name: CySetReg24 ************************************************************************//** * * Writes a 24-bit value to the specified register. * * \param addr The address where data must be written. * \param value The data that must be written. * * \reentrant No * ***************************************************************************/ void CySetReg24(uint32 volatile * addr, uint32 value) { uint8 volatile *tmpAddr; tmpAddr = (uint8 volatile *) addr; tmpAddr[0u] = (uint8) value; tmpAddr[1u] = (uint8) (value >> 8u); tmpAddr[2u] = (uint8) (value >> 16u); } #if(CY_PSOC4) /*************************************************************************** * Function Name: CyGetReg24 ************************************************************************//** * * Reads the 24-bit value from the specified register. * * \param addr The address where data must be read. * * \reentrant No * ***************************************************************************/ uint32 CyGetReg24(uint32 const volatile * addr) { uint8 const volatile *tmpAddr; uint32 value; tmpAddr = (uint8 const volatile *) addr; value = (uint32) tmpAddr[0u]; value |= ((uint32) tmpAddr[1u] << 8u ); value |= ((uint32) tmpAddr[2u] << 16u); return(value); } #endif /*(CY_PSOC4)*/ #endif /* (!CY_PSOC3) */ /* [] END OF FILE */
techdude101/code
PSoC BLE/WS_UARTDeepSleepWakeUp/UARTDeepSleepWakeUp.cydsn/codegentemp/cyutils.c
C
gpl-3.0
2,399
#include <lib/lib.h> int main(int argc, char* argv[]) { return MACRO_IN_LIB; }
marcinkwiatkowski/buck
test/com/facebook/buck/cxx/testdata/reexport_header_deps/bin.c
C
apache-2.0
82
/* GStreamer * Copyright (C) <2013> Wim Taymans <wim.taymans@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include <stdio.h> #include <stdlib.h> #include <gst/gst.h> #include "gst/glib-compat-private.h" #define BUFFER_SIZE (1400) gint main (gint argc, gchar * argv[]) { gint i; GstBuffer *tmp; GstBufferPool *pool; GstClockTime start, end; GstClockTimeDiff dur1, dur2; guint64 nbuffers; GstStructure *conf; gst_init (&argc, &argv); if (argc != 2) { g_print ("usage: %s <nbuffers>\n", argv[0]); exit (-1); } nbuffers = atoi (argv[1]); if (nbuffers <= 0) { g_print ("number of buffers must be greater than 0\n"); exit (-3); } /* Let's just make sure the GstBufferClass is loaded ... */ tmp = gst_buffer_new (); gst_buffer_unref (tmp); pool = gst_buffer_pool_new (); conf = gst_buffer_pool_get_config (pool); gst_buffer_pool_config_set_params (conf, NULL, BUFFER_SIZE, 0, 0); gst_buffer_pool_set_config (pool, conf); gst_buffer_pool_set_active (pool, TRUE); /* allocate buffers directly */ start = gst_util_get_timestamp (); for (i = 0; i < nbuffers; i++) { tmp = gst_buffer_new_allocate (NULL, BUFFER_SIZE, NULL); gst_buffer_unref (tmp); } end = gst_util_get_timestamp (); dur1 = GST_CLOCK_DIFF (start, end); g_print ("*** total %" GST_TIME_FORMAT " - average %" GST_TIME_FORMAT " - Done creating %" G_GUINT64_FORMAT " fresh buffers\n", GST_TIME_ARGS (dur1), GST_TIME_ARGS (dur1 / nbuffers), nbuffers); /* allocate buffers from the pool */ start = gst_util_get_timestamp (); for (i = 0; i < nbuffers; i++) { gst_buffer_pool_acquire_buffer (pool, &tmp, NULL); gst_buffer_unref (tmp); } end = gst_util_get_timestamp (); dur2 = GST_CLOCK_DIFF (start, end); g_print ("*** total %" GST_TIME_FORMAT " - average %" GST_TIME_FORMAT " - Done creating %" G_GUINT64_FORMAT " pooled buffers\n", GST_TIME_ARGS (dur2), GST_TIME_ARGS (dur2 / nbuffers), nbuffers); g_print ("*** speedup %6.4lf\n", ((gdouble) dur1 / (gdouble) dur2)); gst_buffer_pool_set_active (pool, FALSE); gst_object_unref (pool); return 0; }
google/aistreams
third_party/gstreamer/tests/benchmarks/gstpoolstress.c
C
apache-2.0
2,862
// Inferno utils/5c/sgen.c // http://code.google.com/p/inferno-os/source/browse/utils/5c/sgen.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #include "gc.h" Prog* gtext(Sym *s, int32 stkoff) { int32 a; a = 0; if(!(textflag & NOSPLIT)) a = argsize(); else if(stkoff >= 128) yyerror("stack frame too large for NOSPLIT function"); gpseudo(ATEXT, s, nodconst(stkoff)); p->to.type = D_CONST2; p->to.offset2 = a; return p; } void noretval(int n) { if(n & 1) { gins(ANOP, Z, Z); p->to.type = D_REG; p->to.reg = REGRET; } if(n & 2) { gins(ANOP, Z, Z); p->to.type = D_FREG; p->to.reg = FREGRET; } } /* * calculate addressability as follows * CONST ==> 20 $value * NAME ==> 10 name * REGISTER ==> 11 register * INDREG ==> 12 *[(reg)+offset] * &10 ==> 2 $name * ADD(2, 20) ==> 2 $name+offset * ADD(3, 20) ==> 3 $(reg)+offset * &12 ==> 3 $(reg)+offset * *11 ==> 11 ?? * *2 ==> 10 name * *3 ==> 12 *(reg)+offset * calculate complexity (number of registers) */ void xcom(Node *n) { Node *l, *r; int t; if(n == Z) return; l = n->left; r = n->right; n->addable = 0; n->complex = 0; switch(n->op) { case OCONST: n->addable = 20; return; case OREGISTER: n->addable = 11; return; case OINDREG: n->addable = 12; return; case ONAME: n->addable = 10; return; case OADDR: xcom(l); if(l->addable == 10) n->addable = 2; if(l->addable == 12) n->addable = 3; break; case OIND: xcom(l); if(l->addable == 11) n->addable = 12; if(l->addable == 3) n->addable = 12; if(l->addable == 2) n->addable = 10; break; case OADD: xcom(l); xcom(r); if(l->addable == 20) { if(r->addable == 2) n->addable = 2; if(r->addable == 3) n->addable = 3; } if(r->addable == 20) { if(l->addable == 2) n->addable = 2; if(l->addable == 3) n->addable = 3; } break; case OASLMUL: case OASMUL: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASASHL; r->vconst = t; r->type = types[TINT]; } break; case OMUL: case OLMUL: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASHL; r->vconst = t; r->type = types[TINT]; } t = vlog(l); if(t >= 0) { n->op = OASHL; n->left = r; n->right = l; r = l; l = n->left; r->vconst = t; r->type = types[TINT]; } break; case OASLDIV: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASLSHR; r->vconst = t; r->type = types[TINT]; } break; case OLDIV: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OLSHR; r->vconst = t; r->type = types[TINT]; } break; case OASLMOD: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASAND; r->vconst--; } break; case OLMOD: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OAND; r->vconst--; } break; default: if(l != Z) xcom(l); if(r != Z) xcom(r); break; } if(n->addable >= 10) return; if(l != Z) n->complex = l->complex; if(r != Z) { if(r->complex == n->complex) n->complex = r->complex+1; else if(r->complex > n->complex) n->complex = r->complex; } if(n->complex == 0) n->complex++; if(com64(n)) return; switch(n->op) { case OFUNC: n->complex = FNX; break; case OADD: case OXOR: case OAND: case OOR: case OEQ: case ONE: /* * immediate operators, make const on right */ if(l->op == OCONST) { n->left = r; n->right = l; } break; } }
eternalNight/ucore_app_go
src/cmd/5c/sgen.c
C
bsd-3-clause
5,060
// SPDX-License-Identifier: GPL-2.0 /* * Workingset detection * * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner */ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/writeback.h> #include <linux/shmem_fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/swap.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> /* * Double CLOCK lists * * Per node, two clock lists are maintained for file pages: the * inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list * are promoted to the active list, to protect them from reclaim, * whereas active pages are demoted to the inactive list when the * active list grows too big. * * fault ------------------------+ * | * +--------------+ | +-------------+ * reclaim <- | inactive | <-+-- demotion | active | <--+ * +--------------+ +-------------+ | * | | * +-------------- promotion ------------------+ * * * Access frequency and refault distance * * A workload is thrashing when its pages are frequently used but they * are evicted from the inactive list every time before another access * would have promoted them to the active list. * * In cases where the average access distance between thrashing pages * is bigger than the size of memory there is nothing that can be * done - the thrashing set could never fit into memory under any * circumstance. * * However, the average access distance could be bigger than the * inactive list, yet smaller than the size of memory. In this case, * the set could fit into memory if it weren't for the currently * active pages - which may be used more, hopefully less frequently: * * +-memory available to cache-+ * | | * +-inactive------+-active----+ * a b | c d e f g h i | J K L M N | * +---------------+-----------+ * * It is prohibitively expensive to accurately track access frequency * of pages. But a reasonable approximation can be made to measure * thrashing on the inactive list, after which refaulting pages can be * activated optimistically to compete with the existing active pages. * * Approximating inactive page access frequency - Observations: * * 1. When a page is accessed for the first time, it is added to the * head of the inactive list, slides every existing inactive page * towards the tail by one slot, and pushes the current tail page * out of memory. * * 2. When a page is accessed for the second time, it is promoted to * the active list, shrinking the inactive list by one slot. This * also slides all inactive pages that were faulted into the cache * more recently than the activated page towards the tail of the * inactive list. * * Thus: * * 1. The sum of evictions and activations between any two points in * time indicate the minimum number of inactive pages accessed in * between. * * 2. Moving one inactive page N page slots towards the tail of the * list requires at least N inactive page accesses. * * Combining these: * * 1. When a page is finally evicted from memory, the number of * inactive pages accessed while the page was in cache is at least * the number of page slots on the inactive list. * * 2. In addition, measuring the sum of evictions and activations (E) * at the time of a page's eviction, and comparing it to another * reading (R) at the time the page faults back into memory tells * the minimum number of accesses while the page was not cached. * This is called the refault distance. * * Because the first access of the page was the fault and the second * access the refault, we combine the in-cache distance with the * out-of-cache distance to get the complete minimum access distance * of this page: * * NR_inactive + (R - E) * * And knowing the minimum access distance of a page, we can easily * tell if the page would be able to stay in cache assuming all page * slots in the cache were available: * * NR_inactive + (R - E) <= NR_inactive + NR_active * * which can be further simplified to * * (R - E) <= NR_active * * Put into words, the refault distance (out-of-cache) can be seen as * a deficit in inactive list space (in-cache). If the inactive list * had (R - E) more page slots, the page would not have been evicted * in between accesses, but activated instead. And on a full system, * the only thing eating into inactive list space is active pages. * * * Refaulting inactive pages * * All that is known about the active list is that the pages have been * accessed more than once in the past. This means that at any given * time there is actually a good chance that pages on the active list * are no longer in active use. * * So when a refault distance of (R - E) is observed and there are at * least (R - E) active pages, the refaulting page is activated * optimistically in the hope that (R - E) active pages are actually * used less frequently than the refaulting page - or even not used at * all anymore. * * That means if inactive cache is refaulting with a suitable refault * distance, we assume the cache workingset is transitioning and put * pressure on the current active list. * * If this is wrong and demotion kicks in, the pages which are truly * used more frequently will be reactivated while the less frequently * used once will be evicted from memory. * * But if this is right, the stale pages will be pushed out of memory * and the used pages get to stay in cache. * * Refaulting active pages * * If on the other hand the refaulting pages have recently been * deactivated, it means that the active list is no longer protecting * actively used cache from reclaim. The cache is NOT transitioning to * a different workingset; the existing workingset is thrashing in the * space allocated to the page cache. * * * Implementation * * For each node's LRU lists, a counter for inactive evictions and * activations is maintained (node->nonresident_age). * * On eviction, a snapshot of this counter (along with some bits to * identify the node) is stored in the now empty page cache * slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible * refault distance will immediately activate the refaulting page. */ #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) /* * Eviction timestamps need to be able to cover the full range of * actionable refaults. However, bits are tight in the xarray * entry, and after storing the identifier for the lruvec there might * not be enough left to represent every single actionable refault. In * that case, we have to sacrifice granularity for distance, and group * evictions into coarser buckets by shaving off lower timestamp bits. */ static unsigned int bucket_order __read_mostly; static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, bool workingset) { eviction >>= bucket_order; eviction &= EVICTION_MASK; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; eviction = (eviction << 1) | workingset; return xa_mk_value(eviction); } static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp, bool *workingsetp) { unsigned long entry = xa_to_value(shadow); int memcgid, nid; bool workingset; workingset = entry & 1; entry >>= 1; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; *memcgidp = memcgid; *pgdat = NODE_DATA(nid); *evictionp = entry << bucket_order; *workingsetp = workingset; } /** * workingset_age_nonresident - age non-resident entries as LRU ages * @lruvec: the lruvec that was aged * @nr_pages: the number of pages to count * * As in-memory pages are aged, non-resident pages need to be aged as * well, in order for the refault distances later on to be comparable * to the in-memory dimensions. This function allows reclaim and LRU * operations to drive the non-resident aging along in parallel. */ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) { /* * Reclaiming a cgroup means reclaiming all its children in a * round-robin fashion. That means that each cgroup has an LRU * order that is composed of the LRU orders of its child * cgroups; and every page has an LRU position not just in the * cgroup that owns it, but in all of that group's ancestors. * * So when the physical inactive list of a leaf cgroup ages, * the virtual inactive lists of all its parents, including * the root cgroup's, age as well. */ do { atomic_long_add(nr_pages, &lruvec->nonresident_age); } while ((lruvec = parent_lruvec(lruvec))); } /** * workingset_eviction - note the eviction of a page from memory * @target_memcg: the cgroup that is causing the reclaim * @page: the page being evicted * * Returns a shadow entry to be stored in @page->mapping->i_pages in place * of the evicted @page so that a later refault can be detected. */ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) { struct pglist_data *pgdat = page_pgdat(page); unsigned long eviction; struct lruvec *lruvec; int memcgid; /* Page is fully exclusive and pins page's memory cgroup pointer */ VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); workingset_age_nonresident(lruvec, thp_nr_pages(page)); return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); } /** * workingset_refault - evaluate the refault of a previously evicted page * @page: the freshly allocated replacement page * @shadow: shadow entry of the evicted page * * Calculates and evaluates the refault distance of the previously * evicted page in the context of the node and the memcg whose memory * pressure caused the eviction. */ void workingset_refault(struct page *page, void *shadow) { bool file = page_is_file_lru(page); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; unsigned long workingset_size; struct pglist_data *pgdat; struct mem_cgroup *memcg; unsigned long eviction; struct lruvec *lruvec; unsigned long refault; bool workingset; int memcgid; unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the page's eviction. * * Note that in rare events the ID could have been recycled * for a new cgroup that refaults a shared page. This is * impossible to tell from the available data. However, this * should be a rare and limited disturbance, and activations * are always speculative anyway. Ultimately, it's the aging * algorithm's job to shake out the minimum access frequency * for the active cache. * * XXX: On !CONFIG_MEMCG, this will always return NULL; it * would be better if the root_mem_cgroup existed in all * configurations instead. */ eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) goto out; eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); /* * Calculate the refault distance * * The unsigned subtraction here gives an accurate distance * across nonresident_age overflows in most cases. There is a * special case: usually, shadow entries have a short lifetime * and are either refaulted or reclaimed along with the inode * before they get too old. But it is not impossible for the * nonresident_age to lap a shadow entry in the field, which * can then result in a false small refault distance, leading * to a false activation should this old entry actually * refault again. However, earlier kernels used to deactivate * unconditionally with *every* reclaim invocation for the * longest time, so the occasional inappropriate activation * leading to pressure on the active list is not a problem. */ refault_distance = (refault - eviction) & EVICTION_MASK; /* * The activation decision for this page is made at the level * where the eviction occurred, as that is where the LRU order * during page reclaim is being determined. * * However, the cgroup that will own the page is the one that * is actually experiencing the refault event. */ memcg = page_memcg(page); lruvec = mem_cgroup_lruvec(memcg, pgdat); inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if * all the memory was available to the workingset. Whether * workingset competition needs to consider anon or not depends * on having swap. */ workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); if (!file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_FILE); } if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { workingset_size += lruvec_page_state(eviction_lruvec, NR_ACTIVE_ANON); if (file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_ANON); } } if (refault_distance > workingset_size) goto out; SetPageActive(page); workingset_age_nonresident(lruvec, thp_nr_pages(page)); inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ if (workingset) { SetPageWorkingset(page); /* XXX: Move to lru_cache_add() when it supports new vs putback */ lru_note_cost_page(page); inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file); } out: rcu_read_unlock(); } /** * workingset_activation - note a page activation * @page: page that is being activated */ void workingset_activation(struct page *page) { struct mem_cgroup *memcg; struct lruvec *lruvec; rcu_read_lock(); /* * Filter non-memcg pages here, e.g. unmap can call * mark_page_accessed() on VDSO pages. * * XXX: See workingset_refault() - this should return * root_mem_cgroup even for !CONFIG_MEMCG. */ memcg = page_memcg_rcu(page); if (!mem_cgroup_disabled() && !memcg) goto out; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); workingset_age_nonresident(lruvec, thp_nr_pages(page)); out: rcu_read_unlock(); } /* * Shadow entries reflect the share of the working set that does not * fit into memory, so their number depends on the access pattern of * the workload. In most cases, they will refault or get reclaimed * along with the inode, but a (malicious) workload that streams * through files with a total size several times that of available * memory, while preventing the inodes from being reclaimed, can * create excessive amounts of shadow nodes. To keep a lid on this, * track shadow nodes and reclaim them when they grow way past the * point where they would still be useful. */ static struct list_lru shadow_nodes; void workingset_update_node(struct xa_node *node) { /* * Track non-empty nodes that contain only shadow entries; * unlink those that contain pages or are being freed. * * Avoid acquiring the list_lru lock when the nodes are * already where they should be. The list_empty() test is safe * as node->private_list is protected by the i_pages lock. */ VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) { list_lru_add(&shadow_nodes, &node->private_list); __inc_lruvec_kmem_state(node, WORKINGSET_NODES); } } else { if (!list_empty(&node->private_list)) { list_lru_del(&shadow_nodes, &node->private_list); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); } } } static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long max_nodes; unsigned long nodes; unsigned long pages; nodes = list_lru_shrink_count(&shadow_nodes, sc); if (!nodes) return SHRINK_EMPTY; /* * Approximate a reasonable limit for the nodes * containing shadow entries. We don't need to keep more * shadow entries than possible pages on the active list, * since refault distances bigger than that are dismissed. * * The size of the active list converges toward 100% of * overall page cache as memory grows, with only a tiny * inactive list. Assume the total cache size for that. * * Nodes might be sparsely populated, with only one shadow * entry in the extreme case. Obviously, we cannot keep one * node for every eligible shadow entry, so compromise on a * worst-case density of 1/8th. Below that, not all eligible * refaults can be detected anymore. * * On 64-bit with 7 xa_nodes per page and 64 slots * each, this will reclaim shadow entries when they consume * ~1.8% of available memory: * * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE */ #ifdef CONFIG_MEMCG if (sc->memcg) { struct lruvec *lruvec; int i; lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); pages += lruvec_page_state_local( lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; pages += lruvec_page_state_local( lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; } else #endif pages = node_present_pages(sc->nid); max_nodes = pages >> (XA_CHUNK_SHIFT - 3); if (nodes <= max_nodes) return 0; return nodes - max_nodes; } static enum lru_status shadow_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) __must_hold(lru_lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); struct address_space *mapping; int ret; /* * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any * address_space that has nodes on the LRU. * * We can then safely transition to the i_pages lock to * pin only the address_space of the particular node we want * to reclaim, take the node off-LRU, and drop the lru_lock. */ mapping = container_of(node->array, struct address_space, i_pages); /* Coming from the list, invert the lock order */ if (!xa_trylock(&mapping->i_pages)) { spin_unlock_irq(lru_lock); ret = LRU_RETRY; goto out; } list_lru_isolate(lru, item); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); spin_unlock(lru_lock); /* * The nodes should only contain one or more shadow entries, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ if (WARN_ON_ONCE(!node->nr_values)) goto out_invalid; if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; mapping->nrexceptional -= node->nr_values; xa_delete_node(node, workingset_update_node); __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); out_invalid: xa_unlock_irq(&mapping->i_pages); ret = LRU_REMOVED_RETRY; out: cond_resched(); spin_lock_irq(lru_lock); return ret; } static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { /* list_lru lock nests inside the IRQ-safe i_pages lock */ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, NULL); } static struct shrinker workingset_shadow_shrinker = { .count_objects = count_shadow_nodes, .scan_objects = scan_shadow_nodes, .seeks = 0, /* ->count reports only fully expendable nodes */ .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, }; /* * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe * i_pages lock. */ static struct lock_class_key shadow_nodes_key; static int __init workingset_init(void) { unsigned int timestamp_bits; unsigned int max_order; int ret; BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); /* * Calculate the eviction bucket size to cover the longest * actionable refault distance, which is currently half of * memory (totalram_pages/2). However, memory hotplug may add * some more pages at runtime, so keep working with up to * double the initial memory by using totalram_pages as-is. */ timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; max_order = fls_long(totalram_pages() - 1); if (max_order > timestamp_bits) bucket_order = max_order - timestamp_bits; pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); ret = prealloc_shrinker(&workingset_shadow_shrinker); if (ret) goto err; ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, &workingset_shadow_shrinker); if (ret) goto err_list_lru; register_shrinker_prepared(&workingset_shadow_shrinker); return 0; err_list_lru: free_prealloced_shrinker(&workingset_shadow_shrinker); err: return ret; } module_init(workingset_init);
openwrt-es/linux
mm/workingset.c
C
gpl-2.0
21,841
// SPDX-License-Identifier: GPL-2.0 /* * Volume Management Device driver * Copyright (c) 2015, Intel Corporation. */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> #include <linux/srcu.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <asm/irqdomain.h> #include <asm/device.h> #include <asm/msi.h> #include <asm/msidef.h> #define VMD_CFGBAR 0 #define VMD_MEMBAR1 2 #define VMD_MEMBAR2 4 #define PCI_REG_VMCAP 0x40 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) #define PCI_REG_VMCONFIG 0x44 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) #define MB2_SHADOW_OFFSET 0x2000 #define MB2_SHADOW_SIZE 16 enum vmd_features { /* * Device may contain registers which hint the physical location of the * membars, in order to allow proper address translation during * resource assignment to enable guest virtualization */ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), /* * Device may provide root port configuration information which limits * bus numbering */ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), }; /* * Lock for manipulating VMD IRQ lists. */ static DEFINE_RAW_SPINLOCK(list_lock); /** * struct vmd_irq - private data to map driver IRQ to the VMD shared vector * @node: list item for parent traversal. * @irq: back pointer to parent. * @enabled: true if driver enabled IRQ * @virq: the virtual IRQ value provided to the requesting driver. * * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to * a VMD IRQ using this structure. */ struct vmd_irq { struct list_head node; struct vmd_irq_list *irq; bool enabled; unsigned int virq; }; /** * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector * @irq_list: the list of irq's the VMD one demuxes to. * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; }; struct vmd_dev { struct pci_dev *dev; spinlock_t cfg_lock; char __iomem *cfgbar; int msix_count; struct vmd_irq_list *irqs; struct pci_sysdata sysdata; struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; u8 busn_start; struct dma_map_ops dma_ops; struct dma_domain dma_domain; }; static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) { return container_of(bus->sysdata, struct vmd_dev, sysdata); } static inline unsigned int index_from_irqs(struct vmd_dev *vmd, struct vmd_irq_list *irqs) { return irqs - vmd->irqs; } /* * Drivers managing a device in a VMD domain allocate their own IRQs as before, * but the MSI entry for the hardware it's driving will be programmed with a * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its * domain into one of its own, and the VMD driver de-muxes these for the * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations * and irq_chip to set this up. */ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct vmd_irq *vmdirq = data->chip_data; struct vmd_irq_list *irq = vmdirq->irq; struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); msg->data = 0; } /* * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. */ static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&list_lock, flags); WARN_ON(vmdirq->enabled); list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); vmdirq->enabled = true; raw_spin_unlock_irqrestore(&list_lock, flags); data->chip->irq_unmask(data); } static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; data->chip->irq_mask(data); raw_spin_lock_irqsave(&list_lock, flags); if (vmdirq->enabled) { list_del_rcu(&vmdirq->node); vmdirq->enabled = false; } raw_spin_unlock_irqrestore(&list_lock, flags); } /* * XXX: Stubbed until we develop acceptable way to not create conflicts with * other devices sharing the same vector. */ static int vmd_irq_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { return -EINVAL; } static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", .irq_enable = vmd_irq_enable, .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, .irq_set_affinity = vmd_irq_set_affinity, }; static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, msi_alloc_info_t *arg) { return 0; } /* * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { int i, best = 1; unsigned long flags; if (vmd->msix_count == 1) return &vmd->irqs[0]; /* * White list for fast-interrupt handlers. All others will share the * "slow" interrupt vector. */ switch (msi_desc_to_pci_dev(desc)->class) { case PCI_CLASS_STORAGE_EXPRESS: break; default: return &vmd->irqs[0]; } raw_spin_lock_irqsave(&list_lock, flags); for (i = 1; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) best = i; vmd->irqs[best].count++; raw_spin_unlock_irqrestore(&list_lock, flags); return &vmd->irqs[best]; } static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); unsigned int index, vector; if (!vmdirq) return -ENOMEM; INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; index = index_from_irqs(vmd, vmdirq->irq); vector = pci_irq_vector(vmd->dev, index); irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } static void vmd_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq) { struct vmd_irq *vmdirq = irq_get_chip_data(virq); unsigned long flags; synchronize_srcu(&vmdirq->irq->srcu); /* XXX: Potential optimization to rebalance */ raw_spin_lock_irqsave(&list_lock, flags); vmdirq->irq->count--; raw_spin_unlock_irqrestore(&list_lock, flags); kfree(vmdirq); } static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = vmd_from_bus(pdev->bus); if (nvec > vmd->msix_count) return vmd->msix_count; memset(arg, 0, sizeof(*arg)); return 0; } static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; } static struct msi_domain_ops vmd_msi_domain_ops = { .get_hwirq = vmd_get_hwirq, .msi_init = vmd_msi_init, .msi_free = vmd_msi_free, .msi_prepare = vmd_msi_prepare, .set_desc = vmd_set_desc, }; static struct msi_domain_info vmd_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX, .ops = &vmd_msi_domain_ops, .chip = &vmd_msi_controller, }; /* * VMD replaces the requester ID with its own. DMA mappings for devices in a * VMD domain need to be mapped for the VMD, not the device requiring * the mapping. */ static struct device *to_vmd_dev(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = vmd_from_bus(pdev->bus); return &vmd->dev->dev; } static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, gfp_t flag, unsigned long attrs) { return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); } static void vmd_free(struct device *dev, size_t size, void *vaddr, dma_addr_t addr, unsigned long attrs) { return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); } static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, attrs); } static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, attrs); } static dma_addr_t vmd_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, attrs); } static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); } static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); } static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); } static int vmd_dma_supported(struct device *dev, u64 mask) { return dma_supported(to_vmd_dev(dev), mask); } static u64 vmd_get_required_mask(struct device *dev) { return dma_get_required_mask(to_vmd_dev(dev)); } static void vmd_teardown_dma_ops(struct vmd_dev *vmd) { struct dma_domain *domain = &vmd->dma_domain; if (get_dma_ops(&vmd->dev->dev)) del_dma_domain(domain); } #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ do { \ if (source->fn) \ dest->fn = vmd_##fn; \ } while (0) static void vmd_setup_dma_ops(struct vmd_dev *vmd) { const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); struct dma_map_ops *dest = &vmd->dma_ops; struct dma_domain *domain = &vmd->dma_domain; domain->domain_nr = vmd->sysdata.domain; domain->dma_ops = dest; if (!source) return; ASSIGN_VMD_DMA_OPS(source, dest, alloc); ASSIGN_VMD_DMA_OPS(source, dest, free); ASSIGN_VMD_DMA_OPS(source, dest, mmap); ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); ASSIGN_VMD_DMA_OPS(source, dest, map_page); ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); ASSIGN_VMD_DMA_OPS(source, dest, map_sg); ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { char __iomem *addr = vmd->cfgbar + ((bus->number - vmd->busn_start) << 20) + (devfn << 12) + reg; if ((addr - vmd->cfgbar) + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) return NULL; return addr; } /* * CPU may deadlock if config space is not serialized on some versions of this * hardware, so all config space access is done under a spinlock. */ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 *value) { struct vmd_dev *vmd = vmd_from_bus(bus); char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); break; case 2: *value = readw(addr); break; case 4: *value = readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } /* * VMD h/w converts non-posted config writes to posted memory writes. The * read-back in this function forces the completion so it returns only after * the config space was written, as expected. */ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 value) { struct vmd_dev *vmd = vmd_from_bus(bus); char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); readb(addr); break; case 2: writew(value, addr); readw(addr); break; case 4: writel(value, addr); readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } static struct pci_ops vmd_ops = { .read = vmd_pci_read, .write = vmd_pci_write, }; static void vmd_attach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; } static void vmd_detach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = NULL; vmd->dev->resource[VMD_MEMBAR2].child = NULL; } /* * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower * 16 bits are the PCI Segment Group (domain) number. Other bits are * currently reserved. */ static int vmd_find_free_domain(void) { int domain = 0xffff; struct pci_bus *bus = NULL; while ((bus = pci_find_next_bus(bus)) != NULL) domain = max_t(int, domain, pci_domain_nr(bus)); return domain + 1; } static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; struct fwnode_handle *fn; struct resource *res; u32 upper_bits; unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000; struct pci_bus *child; /* * Shadow registers may exist in certain VMD device ids which allow * guests to correctly assign host physical addresses to the root ports * and child devices. These registers will either return the host value * or 0, depending on an enable bit in the VMD device. */ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { u32 vmlock; int ret; membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); if (ret || vmlock == ~0) return -ENODEV; if (MB2_SHADOW_EN(vmlock)) { void __iomem *membar2; membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - readq(membar2 + MB2_SHADOW_OFFSET); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(vmd->dev, membar2); } } /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127 or 128-255 */ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { u32 vmcap, vmconfig; pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); if (BUS_RESTRICT_CAP(vmcap) && (BUS_RESTRICT_CFG(vmconfig) == 0x1)) vmd->busn_start = 128; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", .start = vmd->busn_start, .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; /* * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can * put 32-bit resources in the window. * * There's no hardware reason why a 64-bit window *couldn't* * contain a 32-bit resource, but pbus_size_mem() computes the * bridge window size assuming a 64-bit window will contain no * 32-bit resources. __pci_assign_resource() enforces that * artificial restriction to make sure everything will fit. * * The only way we could use a 64-bit non-prefetchable MEMBAR is * if its address is <4GB so that we can convert it to a 32-bit * resource. To be visible to the host OS, all VMD endpoints must * be initially configured by platform BIOS, which includes setting * up these resources. We can assume the device is configured * according to the platform needs. */ res = &vmd->dev->resource[VMD_MEMBAR1]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[1] = (struct resource) { .name = "VMD MEMBAR1", .start = res->start, .end = res->end, .flags = flags, .parent = res, }; res = &vmd->dev->resource[VMD_MEMBAR2]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[2] = (struct resource) { .name = "VMD MEMBAR2", .start = res->start + membar2_offset, .end = res->end, .flags = flags, .parent = res, }; sd->vmd_domain = true; sd->domain = vmd_find_free_domain(); if (sd->domain < 0) return sd->domain; sd->node = pcibus_to_node(vmd->dev->bus); fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); if (!fn) return -ENODEV; vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); irq_domain_free_fwnode(fn); if (!vmd->irq_domain) return -ENODEV; pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); return -ENODEV; } vmd_attach_resources(vmd); vmd_setup_dma_ops(vmd); dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); pci_scan_child_bus(vmd->bus); pci_assign_unassigned_bus_resources(vmd->bus); /* * VMD root buses are virtual and don't return true on pci_is_pcie() * and will fail pcie_bus_configure_settings() early. It can instead be * run on each of the real root ports. */ list_for_each_entry(child, &vmd->bus->children, node) pcie_bus_configure_settings(child); pci_bus_add_devices(vmd->bus); WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, "domain"), "Can't create symlink to domain\n"); return 0; } static irqreturn_t vmd_irq(int irq, void *data) { struct vmd_irq_list *irqs = data; struct vmd_irq *vmdirq; int idx; idx = srcu_read_lock(&irqs->srcu); list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) generic_handle_irq(vmdirq->virq); srcu_read_unlock(&irqs->srcu, idx); return IRQ_HANDLED; } static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vmd_dev *vmd; int i, err; if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); if (!vmd) return -ENOMEM; vmd->dev = dev; err = pcim_enable_device(dev); if (err < 0) return err; vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); if (!vmd->cfgbar) return -ENOMEM; pci_set_master(dev); if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) return -ENODEV; vmd->msix_count = pci_msix_vec_count(dev); if (vmd->msix_count < 0) return -ENODEV; vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, PCI_IRQ_MSIX); if (vmd->msix_count < 0) return vmd->msix_count; vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), GFP_KERNEL); if (!vmd->irqs) return -ENOMEM; for (i = 0; i < vmd->msix_count; i++) { err = init_srcu_struct(&vmd->irqs[i].srcu); if (err) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), vmd_irq, IRQF_NO_THREAD, "vmd", &vmd->irqs[i]); if (err) return err; } spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); if (err) return err; dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", vmd->sysdata.domain); return 0; } static void vmd_cleanup_srcu(struct vmd_dev *vmd) { int i; for (i = 0; i < vmd->msix_count; i++) cleanup_srcu_struct(&vmd->irqs[i].srcu); } static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); } #ifdef CONFIG_PM_SLEEP static int vmd_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int i; for (i = 0; i < vmd->msix_count; i++) devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); pci_save_state(pdev); return 0; } static int vmd_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int err, i; for (i = 0; i < vmd->msix_count; i++) { err = devm_request_irq(dev, pci_irq_vector(pdev, i), vmd_irq, IRQF_NO_THREAD, "vmd", &vmd->irqs[i]); if (err) return err; } pci_restore_state(pdev); return 0; } #endif static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); static struct pci_driver vmd_drv = { .name = "vmd", .id_table = vmd_ids, .probe = vmd_probe, .remove = vmd_remove, .driver = { .pm = &vmd_dev_pm_ops, }, }; module_pci_driver(vmd_drv); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.6");
BPI-SINOVOIP/BPI-Mainline-kernel
linux-5.4/drivers/pci/controller/vmd.c
C
gpl-2.0
23,241
/* sam_header.c -- basic SAM/BAM header API. Copyright (C) 2009-2013 Genome Research Ltd. Author: Petr Danecek <pd3@sanger.ac.uk> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "sam_header.h" #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <stdarg.h> #include "htslib/khash.h" KHASH_MAP_INIT_STR(str, const char *) struct _HeaderList { struct _HeaderList *last; // Hack: Used and maintained only by list_append_to_end. Maintained in the root node only. struct _HeaderList *next; void *data; }; typedef struct _HeaderList list_t; typedef list_t HeaderDict; typedef struct { char key[2]; char *value; } HeaderTag; typedef struct { char type[2]; list_t *tags; } HeaderLine; const char *o_hd_tags[] = {"SO","GO",NULL}; const char *r_hd_tags[] = {"VN",NULL}; const char *o_sq_tags[] = {"AS","M5","UR","SP",NULL}; const char *r_sq_tags[] = {"SN","LN",NULL}; const char *u_sq_tags[] = {"SN",NULL}; const char *o_rg_tags[] = {"CN","DS","DT","FO","KS","LB","PG","PI","PL","PU","SM",NULL}; const char *r_rg_tags[] = {"ID",NULL}; const char *u_rg_tags[] = {"ID",NULL}; const char *o_pg_tags[] = {"VN","CL",NULL}; const char *r_pg_tags[] = {"ID",NULL}; const char *types[] = {"HD","SQ","RG","PG","CO",NULL}; const char **optional_tags[] = {o_hd_tags,o_sq_tags,o_rg_tags,o_pg_tags,NULL,NULL}; const char **required_tags[] = {r_hd_tags,r_sq_tags,r_rg_tags,r_pg_tags,NULL,NULL}; const char **unique_tags[] = {NULL, u_sq_tags,u_rg_tags,NULL,NULL,NULL}; static void debug(const char *format, ...) { va_list ap; va_start(ap, format); vfprintf(stderr, format, ap); va_end(ap); } #if 0 // Replaced by list_append_to_end static list_t *list_prepend(list_t *root, void *data) { list_t *l = malloc(sizeof(list_t)); l->next = root; l->data = data; return l; } #endif // Relies on the root->last being correct. Do not use with the other list_* // routines unless they are fixed to modify root->last as well. static list_t *list_append_to_end(list_t *root, void *data) { list_t *l = malloc(sizeof(list_t)); l->last = l; l->next = NULL; l->data = data; if ( !root ) return l; root->last->next = l; root->last = l; return root; } static list_t *list_append(list_t *root, void *data) { list_t *l = root; while (l && l->next) l = l->next; if ( l ) { l->next = malloc(sizeof(list_t)); l = l->next; } else { l = malloc(sizeof(list_t)); root = l; } l->data = data; l->next = NULL; return root; } static void list_free(list_t *root) { list_t *l = root; while (root) { l = root; root = root->next; free(l); } } // Look for a tag "XY" in a predefined const char *[] array. static int tag_exists(const char *tag, const char **tags) { int itag=0; if ( !tags ) return -1; while ( tags[itag] ) { if ( tags[itag][0]==tag[0] && tags[itag][1]==tag[1] ) return itag; itag++; } return -1; } // Mimics the behaviour of getline, except it returns pointer to the next chunk of the text // or NULL if everything has been read. The lineptr should be freed by the caller. The // newline character is stripped. static const char *nextline(char **lineptr, size_t *n, const char *text) { int len; const char *to = text; if ( !*to ) return NULL; while ( *to && *to!='\n' && *to!='\r' ) to++; len = to - text + 1; if ( *to ) { // Advance the pointer for the next call if ( *to=='\n' ) to++; else if ( *to=='\r' && *(to+1)=='\n' ) to+=2; } if ( !len ) return to; if ( !*lineptr ) { *lineptr = malloc(len); *n = len; } else if ( *n<len ) { *lineptr = realloc(*lineptr, len); *n = len; } if ( !*lineptr ) { debug("[nextline] Insufficient memory!\n"); return 0; } memcpy(*lineptr,text,len); (*lineptr)[len-1] = 0; return to; } // name points to "XY", value_from points to the first character of the value string and // value_to points to the last character of the value string. static HeaderTag *new_tag(const char *name, const char *value_from, const char *value_to) { HeaderTag *tag = malloc(sizeof(HeaderTag)); int len = value_to-value_from+1; tag->key[0] = name[0]; tag->key[1] = name[1]; tag->value = malloc(len+1); memcpy(tag->value,value_from,len+1); tag->value[len] = 0; return tag; } static HeaderTag *header_line_has_tag(HeaderLine *hline, const char *key) { list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; if ( tag->key[0]==key[0] && tag->key[1]==key[1] ) return tag; tags = tags->next; } return NULL; } // Return codes: // 0 .. different types or unique tags differ or conflicting tags, cannot be merged // 1 .. all tags identical -> no need to merge, drop one // 2 .. the unique tags match and there are some conflicting tags (same tag, different value) -> error, cannot be merged nor duplicated // 3 .. there are some missing complementary tags and no unique conflict -> can be merged into a single line static int sam_header_compare_lines(HeaderLine *hline1, HeaderLine *hline2) { HeaderTag *t1, *t2; if ( hline1->type[0]!=hline2->type[0] || hline1->type[1]!=hline2->type[1] ) return 0; int itype = tag_exists(hline1->type,types); if ( itype==-1 ) { debug("[sam_header_compare_lines] Unknown type [%c%c]\n", hline1->type[0],hline1->type[1]); return -1; // FIXME (lh3): error; I do not know how this will be handled in Petr's code } if ( unique_tags[itype] ) { t1 = header_line_has_tag(hline1,unique_tags[itype][0]); t2 = header_line_has_tag(hline2,unique_tags[itype][0]); if ( !t1 || !t2 ) // this should never happen, the unique tags are required return 2; if ( strcmp(t1->value,t2->value) ) return 0; // the unique tags differ, cannot be merged } if ( !required_tags[itype] && !optional_tags[itype] ) { t1 = hline1->tags->data; t2 = hline2->tags->data; if ( !strcmp(t1->value,t2->value) ) return 1; // identical comments return 0; } int missing=0, itag=0; while ( required_tags[itype] && required_tags[itype][itag] ) { t1 = header_line_has_tag(hline1,required_tags[itype][itag]); t2 = header_line_has_tag(hline2,required_tags[itype][itag]); if ( !t1 && !t2 ) return 2; // this should never happen else if ( !t1 || !t2 ) missing = 1; // there is some tag missing in one of the hlines else if ( strcmp(t1->value,t2->value) ) { if ( unique_tags[itype] ) return 2; // the lines have a matching unique tag but have a conflicting tag return 0; // the lines contain conflicting tags, cannot be merged } itag++; } itag = 0; while ( optional_tags[itype] && optional_tags[itype][itag] ) { t1 = header_line_has_tag(hline1,optional_tags[itype][itag]); t2 = header_line_has_tag(hline2,optional_tags[itype][itag]); if ( !t1 && !t2 ) { itag++; continue; } if ( !t1 || !t2 ) missing = 1; // there is some tag missing in one of the hlines else if ( strcmp(t1->value,t2->value) ) { if ( unique_tags[itype] ) return 2; // the lines have a matching unique tag but have a conflicting tag return 0; // the lines contain conflicting tags, cannot be merged } itag++; } if ( missing ) return 3; // there are some missing complementary tags with no conflicts, can be merged return 1; } static HeaderLine *sam_header_line_clone(const HeaderLine *hline) { list_t *tags; HeaderLine *out = malloc(sizeof(HeaderLine)); out->type[0] = hline->type[0]; out->type[1] = hline->type[1]; out->tags = NULL; tags = hline->tags; while (tags) { HeaderTag *old = tags->data; HeaderTag *new = malloc(sizeof(HeaderTag)); new->key[0] = old->key[0]; new->key[1] = old->key[1]; new->value = strdup(old->value); out->tags = list_append(out->tags, new); tags = tags->next; } return out; } static int sam_header_line_merge_with(HeaderLine *out_hline, const HeaderLine *tmpl_hline) { list_t *tmpl_tags; if ( out_hline->type[0]!=tmpl_hline->type[0] || out_hline->type[1]!=tmpl_hline->type[1] ) return 0; tmpl_tags = tmpl_hline->tags; while (tmpl_tags) { HeaderTag *tmpl_tag = tmpl_tags->data; HeaderTag *out_tag = header_line_has_tag(out_hline, tmpl_tag->key); if ( !out_tag ) { HeaderTag *tag = malloc(sizeof(HeaderTag)); tag->key[0] = tmpl_tag->key[0]; tag->key[1] = tmpl_tag->key[1]; tag->value = strdup(tmpl_tag->value); out_hline->tags = list_append(out_hline->tags,tag); } tmpl_tags = tmpl_tags->next; } return 1; } static HeaderLine *sam_header_line_parse(const char *headerLine) { HeaderLine *hline; HeaderTag *tag; const char *from, *to; from = headerLine; if ( *from != '@' ) { debug("[sam_header_line_parse] expected '@', got [%s]\n", headerLine); return 0; } to = ++from; while (*to && *to!='\t') to++; if ( to-from != 2 ) { debug("[sam_header_line_parse] expected '@XY', got [%s]\nHint: The header tags must be tab-separated.\n", headerLine); return 0; } hline = malloc(sizeof(HeaderLine)); hline->type[0] = from[0]; hline->type[1] = from[1]; hline->tags = NULL; int itype = tag_exists(hline->type, types); from = to; while (*to && *to=='\t') to++; if ( to-from != 1 ) { debug("[sam_header_line_parse] multiple tabs on line [%s] (%d)\n", headerLine,(int)(to-from)); free(hline); return 0; } from = to; while (*from) { while (*to && *to!='\t') to++; if ( !required_tags[itype] && !optional_tags[itype] ) { // CO is a special case, it can contain anything, including tabs if ( *to ) { to++; continue; } tag = new_tag(" ",from,to-1); } else tag = new_tag(from,from+3,to-1); if ( header_line_has_tag(hline,tag->key) ) debug("The tag '%c%c' present (at least) twice on line [%s]\n", tag->key[0],tag->key[1], headerLine); hline->tags = list_append(hline->tags, tag); from = to; while (*to && *to=='\t') to++; if ( *to && to-from != 1 ) { debug("[sam_header_line_parse] multiple tabs on line [%s] (%d)\n", headerLine,(int)(to-from)); return 0; } from = to; } return hline; } // Must be of an existing type, all tags must be recognised and all required tags must be present static int sam_header_line_validate(HeaderLine *hline) { list_t *tags; HeaderTag *tag; int itype, itag; // Is the type correct? itype = tag_exists(hline->type, types); if ( itype==-1 ) { debug("The type [%c%c] not recognised.\n", hline->type[0],hline->type[1]); return 0; } // Has all required tags? itag = 0; while ( required_tags[itype] && required_tags[itype][itag] ) { if ( !header_line_has_tag(hline,required_tags[itype][itag]) ) { debug("The tag [%c%c] required for [%c%c] not present.\n", required_tags[itype][itag][0],required_tags[itype][itag][1], hline->type[0],hline->type[1]); return 0; } itag++; } // Are all tags recognised? tags = hline->tags; while ( tags ) { tag = tags->data; if ( !tag_exists(tag->key,required_tags[itype]) && !tag_exists(tag->key,optional_tags[itype]) ) { // Lower case tags are user-defined values. if( !(islower(tag->key[0]) || islower(tag->key[1])) ) { // Neither is lower case, but tag was not recognized. debug("Unknown tag [%c%c] for [%c%c].\n", tag->key[0],tag->key[1], hline->type[0],hline->type[1]); // return 0; // Even unknown tags are allowed - for forward compatibility with new attributes } // else - allow user defined tag } tags = tags->next; } return 1; } static void print_header_line(FILE *fp, HeaderLine *hline) { list_t *tags = hline->tags; HeaderTag *tag; fprintf(fp, "@%c%c", hline->type[0],hline->type[1]); while (tags) { tag = tags->data; fprintf(fp, "\t"); if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) fprintf(fp, "%c%c:", tag->key[0],tag->key[1]); fprintf(fp, "%s", tag->value); tags = tags->next; } fprintf(fp,"\n"); } static void sam_header_line_free(HeaderLine *hline) { list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; free(tag->value); free(tag); tags = tags->next; } list_free(hline->tags); free(hline); } void sam_header_free(void *_header) { HeaderDict *header = (HeaderDict*)_header; list_t *hlines = header; while (hlines) { sam_header_line_free(hlines->data); hlines = hlines->next; } list_free(header); } HeaderDict *sam_header_clone(const HeaderDict *dict) { HeaderDict *out = NULL; while (dict) { HeaderLine *hline = dict->data; out = list_append(out, sam_header_line_clone(hline)); dict = dict->next; } return out; } // Returns a newly allocated string char *sam_header_write(const void *_header) { const HeaderDict *header = (const HeaderDict*)_header; char *out = NULL; int len=0, nout=0; const list_t *hlines; // Calculate the length of the string to allocate hlines = header; while (hlines) { len += 4; // @XY and \n HeaderLine *hline = hlines->data; list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; len += strlen(tag->value) + 1; // \t if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) len += strlen(tag->value) + 3; // XY: tags = tags->next; } hlines = hlines->next; } nout = 0; out = malloc(len+1); hlines = header; while (hlines) { HeaderLine *hline = hlines->data; nout += sprintf(out+nout,"@%c%c",hline->type[0],hline->type[1]); list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; nout += sprintf(out+nout,"\t"); if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) nout += sprintf(out+nout,"%c%c:", tag->key[0],tag->key[1]); nout += sprintf(out+nout,"%s", tag->value); tags = tags->next; } hlines = hlines->next; nout += sprintf(out+nout,"\n"); } out[len] = 0; return out; } void *sam_header_parse2(const char *headerText) { list_t *hlines = NULL; HeaderLine *hline; const char *text; char *buf=NULL; size_t nbuf = 0; int tovalidate = 0; if ( !headerText ) return 0; text = headerText; while ( (text=nextline(&buf, &nbuf, text)) ) { hline = sam_header_line_parse(buf); if ( hline && (!tovalidate || sam_header_line_validate(hline)) ) // With too many (~250,000) reference sequences the header parsing was too slow with list_append. hlines = list_append_to_end(hlines, hline); else { if (hline) sam_header_line_free(hline); sam_header_free(hlines); if ( buf ) free(buf); return NULL; } } if ( buf ) free(buf); return hlines; } void *sam_header2tbl(const void *_dict, char type[2], char key_tag[2], char value_tag[2]) { const HeaderDict *dict = (const HeaderDict*)_dict; const list_t *l = dict; khash_t(str) *tbl = kh_init(str); khiter_t k; int ret; if (_dict == 0) return tbl; // return an empty (not null) hash table while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key, *value; key = header_line_has_tag(hline,key_tag); value = header_line_has_tag(hline,value_tag); if ( !key || !value ) { l = l->next; continue; } k = kh_get(str, tbl, key->value); if ( k != kh_end(tbl) ) debug("[sam_header_lookup_table] They key %s not unique.\n", key->value); k = kh_put(str, tbl, key->value, &ret); kh_value(tbl, k) = value->value; l = l->next; } return tbl; } char **sam_header2list(const void *_dict, char type[2], char key_tag[2], int *_n) { const HeaderDict *dict = (const HeaderDict*)_dict; const list_t *l = dict; int max, n; char **ret; ret = 0; *_n = max = n = 0; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key; key = header_line_has_tag(hline,key_tag); if ( !key ) { l = l->next; continue; } if (n == max) { max = max? max<<1 : 4; ret = realloc(ret, max * sizeof(char*)); } ret[n++] = key->value; l = l->next; } *_n = n; return ret; } void *sam_header2key_val(void *iter, const char type[2], const char key_tag[2], const char value_tag[2], const char **_key, const char **_value) { list_t *l = iter; if ( !l ) return NULL; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key, *value; key = header_line_has_tag(hline,key_tag); value = header_line_has_tag(hline,value_tag); if ( !key && !value ) { l = l->next; continue; } *_key = key->value; *_value = value->value; return l->next; } return l; } const char *sam_tbl_get(void *h, const char *key) { khash_t(str) *tbl = (khash_t(str)*)h; khint_t k; k = kh_get(str, tbl, key); return k == kh_end(tbl)? 0 : kh_val(tbl, k); } int sam_tbl_size(void *h) { khash_t(str) *tbl = (khash_t(str)*)h; return h? kh_size(tbl) : 0; } void sam_tbl_destroy(void *h) { khash_t(str) *tbl = (khash_t(str)*)h; kh_destroy(str, tbl); } void *sam_header_merge(int n, const void **_dicts) { const HeaderDict **dicts = (const HeaderDict**)_dicts; HeaderDict *out_dict; int idict, status; if ( n<2 ) return NULL; out_dict = sam_header_clone(dicts[0]); for (idict=1; idict<n; idict++) { const list_t *tmpl_hlines = dicts[idict]; while ( tmpl_hlines ) { list_t *out_hlines = out_dict; int inserted = 0; while ( out_hlines ) { status = sam_header_compare_lines(tmpl_hlines->data, out_hlines->data); if ( status==0 ) { out_hlines = out_hlines->next; continue; } if ( status==2 ) { print_header_line(stderr,tmpl_hlines->data); print_header_line(stderr,out_hlines->data); debug("Conflicting lines, cannot merge the headers.\n"); return 0; } if ( status==3 ) sam_header_line_merge_with(out_hlines->data, tmpl_hlines->data); inserted = 1; break; } if ( !inserted ) out_dict = list_append(out_dict, sam_header_line_clone(tmpl_hlines->data)); tmpl_hlines = tmpl_hlines->next; } } return out_dict; } char **sam_header2tbl_n(const void *dict, const char type[2], const char *tags[], int *n) { int nout = 0; char **out = NULL; *n = 0; list_t *l = (list_t *)dict; if ( !l ) return NULL; int i, ntags = 0; while ( tags[ntags] ) ntags++; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } out = (char**) realloc(out, sizeof(char*)*(nout+1)*ntags); for (i=0; i<ntags; i++) { HeaderTag *key = header_line_has_tag(hline, tags[i]); if ( !key ) { out[nout*ntags+i] = NULL; continue; } out[nout*ntags+i] = key->value; } nout++; l = l->next; } *n = nout; return out; }
hanfang/scikit-ribo
tools/samtools-1.2/sam_header.c
C
gpl-2.0
22,559
/* packet-smrse.c * Routines for SMRSE Short Message Relay Service packet dissection * Ronnie Sahlberg 2004 * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/asn1.h> #include "packet-ber.h" #include "packet-smrse.h" #define PNAME "Short Message Relaying Service" #define PSNAME "SMRSE" #define PFNAME "smrse" #define TCP_PORT_SMRSE 4321 void proto_register_smrse(void); void proto_reg_handoff_smrse(void); /* Initialize the protocol and registered fields */ static int proto_smrse = -1; static int hf_smrse_reserved = -1; static int hf_smrse_tag = -1; static int hf_smrse_length = -1; static int hf_smrse_Octet_Format = -1; #include "packet-smrse-hf.c" /* Initialize the subtree pointers */ static gint ett_smrse = -1; #include "packet-smrse-ett.c" #include "packet-smrse-fn.c" static const value_string tag_vals[] = { { 1, "AliveTest" }, { 2, "AliveTestRsp" }, { 3, "Bind" }, { 4, "BindRsp" }, { 5, "BindFail" }, { 6, "Unbind" }, { 7, "MT" }, { 8, "MO" }, { 9, "Ack" }, { 10, "Error" }, { 11, "Alert" }, { 0, NULL } }; static int dissect_smrse(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, void *data _U_) { proto_item *item = NULL; proto_tree *tree = NULL; guint8 reserved, tag; int offset=0; asn1_ctx_t asn1_ctx; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); reserved=tvb_get_guint8(tvb, 0); tag=tvb_get_guint8(tvb, 3); if( reserved!= 126 ) return 0; if( (tag<1)||(tag>11) ) return 0; if(parent_tree){ item = proto_tree_add_item(parent_tree, proto_smrse, tvb, 0, -1, ENC_NA); tree = proto_item_add_subtree(item, ett_smrse); } col_set_str(pinfo->cinfo, COL_PROTOCOL, "SMRSE"); col_add_str(pinfo->cinfo, COL_INFO, val_to_str(tag, tag_vals,"Unknown Tag:0x%02x")); proto_tree_add_item(tree, hf_smrse_reserved, tvb, 0, 1, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_smrse_length, tvb, 1, 2, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_smrse_tag, tvb, 3, 1, ENC_BIG_ENDIAN); switch(tag){ case 1: case 2: offset=4; break; case 3: offset=dissect_smrse_SMR_Bind(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 4: offset=dissect_smrse_SMR_Bind_Confirm(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 5: offset=dissect_smrse_SMR_Bind_Failure(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 6: offset=dissect_smrse_SMR_Unbind(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 7: offset=dissect_smrse_RPDataMT(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 8: offset=dissect_smrse_RPDataMO(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 9: offset=dissect_smrse_RPAck(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 10: offset=dissect_smrse_RPError(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 11: offset=dissect_smrse_RPAlertSC(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; } return offset; } /*--- proto_register_smrse ----------------------------------------------*/ void proto_register_smrse(void) { /* List of fields */ static hf_register_info hf[] = { { &hf_smrse_reserved, { "Reserved", "smrse.reserved", FT_UINT8, BASE_DEC, NULL, 0, "Reserved byte, must be 126", HFILL }}, { &hf_smrse_tag, { "Tag", "smrse.tag", FT_UINT8, BASE_DEC, VALS(tag_vals), 0, NULL, HFILL }}, { &hf_smrse_length, { "Length", "smrse.length", FT_UINT16, BASE_DEC, NULL, 0, "Length of SMRSE PDU", HFILL }}, { &hf_smrse_Octet_Format, { "octet-Format", "smrse.octet_Format", FT_STRING, BASE_NONE, NULL, 0, "SMS-Address/address-value/octet-format", HFILL }}, #include "packet-smrse-hfarr.c" }; /* List of subtrees */ static gint *ett[] = { &ett_smrse, #include "packet-smrse-ettarr.c" }; /* Register protocol */ proto_smrse = proto_register_protocol(PNAME, PSNAME, PFNAME); /* Register fields and subtrees */ proto_register_field_array(proto_smrse, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); } /*--- proto_reg_handoff_smrse -------------------------------------------*/ void proto_reg_handoff_smrse(void) { dissector_handle_t smrse_handle; smrse_handle = new_create_dissector_handle(dissect_smrse, proto_smrse); dissector_add_uint("tcp.port",TCP_PORT_SMRSE, smrse_handle); }
frenos/wireshark
asn1/smrse/packet-smrse-template.c
C
gpl-2.0
5,066
#if !defined(_FXFT_VERSION_) || _FXFT_VERSION_ == 2501 /***************************************************************************/ /* */ /* truetype.c */ /* */ /* FreeType TrueType driver component (body only). */ /* */ /* Copyright 1996-2001, 2004, 2006, 2012 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #define FT_MAKE_OPTION_SINGLE_OBJECT #define FT2_BUILD_LIBRARY #include "../../include/ft2build.h" #include "ttpic.c" #include "ttdriver.c" /* driver interface */ #include "ttpload.c" /* tables loader */ #include "ttgload.c" /* glyph loader */ #include "ttobjs.c" /* object manager */ #ifdef TT_USE_BYTECODE_INTERPRETER #include "ttinterp.c" #include "ttsubpix.c" #endif #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT #include "ttgxvar.c" /* gx distortable font */ #endif /* END */ #endif
s20121035/rk3288_android5.1_repo
external/pdfium/core/src/fxge/fx_freetype/fxft2.5.01/src/truetype/fxft_truetype.c
C
gpl-3.0
1,828
/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */ /* Written 1994, 1995,1996 by Bao C. Ha. Copyright (C) 1994, 1995,1996 by Bao C. Ha. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached at bao.ha@srs.gov or 418 Hastings Place, Martinez, GA 30907. Things remaining to do: Better record keeping of errors. Eliminate transmit interrupt to reduce overhead. Implement "concurrent processing". I won't be doing it! Bugs: If you have a problem of not detecting the 82595 during a reboot (warm reset), disable the FLASH memory should fix it. This is a compatibility hardware problem. Versions: 0.13b basic ethtool support (aris, 09/13/2004) 0.13a in memory shortage, drop packets also in board (Michael Westermann <mw@microdata-pos.de>, 07/30/2002) 0.13 irq sharing, rewrote probe function, fixed a nasty bug in hardware_send_packet and a major cleanup (aris, 11/08/2001) 0.12d fixing a problem with single card detected as eight eth devices fixing a problem with sudden drop in card performance (chris (asdn@go2.pl), 10/29/2001) 0.12c fixing some problems with old cards (aris, 01/08/2001) 0.12b misc fixes (aris, 06/26/2000) 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x (aris (aris@conectiva.com.br), 05/19/2000) 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999) 0.11d added __initdata, __init stuff; call spin_lock_init in eepro_probe1. Replaced "eepro" by dev->name. Augmented the code protected by spin_lock in interrupt routine (PdP, 12/12/1998) 0.11c minor cleanup (PdP, RMC, 09/12/1998) 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module under 2.1.xx. Debug messages are flagged as KERN_DEBUG to avoid console flooding. Added locking at critical parts. Now the dawn thing is SMP safe. 0.11a Attempt to get 2.1.xx support up (RMC) 0.11 Brian Candler added support for multiple cards. Tested as a module, no idea if it works when compiled into kernel. 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails because the irq is lost somewhere. Fixed that by moving request_irq and free_irq to eepro_open and eepro_close respectively. 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt. I'll need to find a way to specify an ioport other than the default one in the PnP case. PnP definitively sucks. And, yes, this is not the only reason. 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup; to use. 0.10b Should work now with (some) Pro/10+. At least for me (and my two cards) it does. _No_ guarantee for function with non-Pro/10+ cards! (don't have any) (RMC, 9/11/96) 0.10 Added support for the Etherexpress Pro/10+. The IRQ map was changed significantly from the old pro/10. The new interrupt map was provided by Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu). (BCH, 9/3/96) 0.09 Fixed a race condition in the transmit algorithm, which causes crashes under heavy load with fast pentium computers. The performance should also improve a bit. The size of RX buffer, and hence TX buffer, can also be changed via lilo or insmod. (BCH, 7/31/96) 0.08 Implement 32-bit I/O for the 82595TX and 82595FX based lan cards. Disable full-duplex mode if TPE is not used. (BCH, 4/8/96) 0.07a Fix a stat report which counts every packet as a heart-beat failure. (BCH, 6/3/95) 0.07 Modified to support all other 82595-based lan cards. The IRQ vector of the EtherExpress Pro will be set according to the value saved in the EEPROM. For other cards, I will do autoirq_request() to grab the next available interrupt vector. (BCH, 3/17/95) 0.06a,b Interim released. Minor changes in the comments and print out format. (BCH, 3/9/95 and 3/14/95) 0.06 First stable release that I am comfortable with. (BCH, 3/2/95) 0.05 Complete testing of multicast. (BCH, 2/23/95) 0.04 Adding multicast support. (BCH, 2/14/95) 0.03 First widely alpha release for public testing. (BCH, 2/14/95) */ static const char version[] = "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n"; #include <linux/module.h> /* Sources: This driver wouldn't have been written without the availability of the Crynwr's Lan595 driver source code. It helps me to familiarize with the 82595 chipset while waiting for the Intel documentation. I also learned how to detect the 82595 using the packet driver's technique. This driver is written by cutting and pasting the skeleton.c driver provided by Donald Becker. I also borrowed the EEPROM routine from Donald Becker's 82586 driver. Datasheet for the Intel 82595 (including the TX and FX version). It provides just enough info that the casual reader might think that it documents the i82595. The User Manual for the 82595. It provides a lot of the missing information. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ethtool.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #define DRV_NAME "eepro" #define DRV_VERSION "0.13c" #define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) /* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ #define SLOW_DOWN inb(0x80) /* udelay(2) */ #define compat_init_data __initdata enum iftype { AUI=0, BNC=1, TPE=2 }; /* First, a few definitions that the brave might change. */ /* A zero-terminated list of I/O addresses to be probed. */ static unsigned int eepro_portlist[] compat_init_data = { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0}; /* note: 0x300 is default, the 595FX supports ALL IO Ports from 0x000 to 0x3F0, some of which are reserved in PCs */ /* To try the (not-really PnP Wakeup: */ /* #define PnPWakeup */ /* use 0 for production, 1 for verification, >2 for debug */ #ifndef NET_DEBUG #define NET_DEBUG 0 #endif static unsigned int net_debug = NET_DEBUG; /* The number of low I/O ports used by the ethercard. */ #define EEPRO_IO_EXTENT 16 /* Different 82595 chips */ #define LAN595 0 #define LAN595TX 1 #define LAN595FX 2 #define LAN595FX_10ISA 3 /* Information that need to be kept for each board. */ struct eepro_local { unsigned rx_start; unsigned tx_start; /* start of the transmit chain */ int tx_last; /* pointer to last packet in the transmit chain */ unsigned tx_end; /* end of the transmit chain (plus 1) */ int eepro; /* 1 for the EtherExpress Pro/10, 2 for the EtherExpress Pro/10+, 3 for the EtherExpress 10 (blue cards), 0 for other 82595-based lan cards. */ int version; /* a flag to indicate if this is a TX or FX version of the 82595 chip. */ int stepping; spinlock_t lock; /* Serializing lock */ unsigned rcv_ram; /* pre-calculated space for rx */ unsigned xmt_ram; /* pre-calculated space for tx */ unsigned char xmt_bar; unsigned char xmt_lower_limit_reg; unsigned char xmt_upper_limit_reg; short xmt_lower_limit; short xmt_upper_limit; short rcv_lower_limit; short rcv_upper_limit; unsigned char eeprom_reg; unsigned short word[8]; }; /* The station (ethernet) address prefix, used for IDing the board. */ #define SA_ADDR0 0x00 /* Etherexpress Pro/10 */ #define SA_ADDR1 0xaa #define SA_ADDR2 0x00 #define GetBit(x,y) ((x & (1<<y))>>y) /* EEPROM Word 0: */ #define ee_PnP 0 /* Plug 'n Play enable bit */ #define ee_Word1 1 /* Word 1? */ #define ee_BusWidth 2 /* 8/16 bit */ #define ee_FlashAddr 3 /* Flash Address */ #define ee_FlashMask 0x7 /* Mask */ #define ee_AutoIO 6 /* */ #define ee_reserved0 7 /* =0! */ #define ee_Flash 8 /* Flash there? */ #define ee_AutoNeg 9 /* Auto Negotiation enabled? */ #define ee_IO0 10 /* IO Address LSB */ #define ee_IO0Mask 0x /*...*/ #define ee_IO1 15 /* IO MSB */ /* EEPROM Word 1: */ #define ee_IntSel 0 /* Interrupt */ #define ee_IntMask 0x7 #define ee_LI 3 /* Link Integrity 0= enabled */ #define ee_PC 4 /* Polarity Correction 0= enabled */ #define ee_TPE_AUI 5 /* PortSelection 1=TPE */ #define ee_Jabber 6 /* Jabber prevention 0= enabled */ #define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */ #define ee_SMOUT 8 /* SMout Pin Control 0= Input */ #define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */ #define ee_reserved1 10 /* .. 12 =0! */ #define ee_AltReady 13 /* Alternate Ready, 0=normal */ #define ee_reserved2 14 /* =0! */ #define ee_Duplex 15 /* Word2,3,4: */ #define ee_IA5 0 /*bit start for individual Addr Byte 5 */ #define ee_IA4 8 /*bit start for individual Addr Byte 5 */ #define ee_IA3 0 /*bit start for individual Addr Byte 5 */ #define ee_IA2 8 /*bit start for individual Addr Byte 5 */ #define ee_IA1 0 /*bit start for individual Addr Byte 5 */ #define ee_IA0 8 /*bit start for individual Addr Byte 5 */ /* Word 5: */ #define ee_BNC_TPE 0 /* 0=TPE */ #define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */ #define ee_BootTypeMask 0x3 #define ee_NumConn 3 /* Number of Connections 0= One or Two */ #define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */ #define ee_PortTPE 5 #define ee_PortBNC 6 #define ee_PortAUI 7 #define ee_PowerMgt 10 /* 0= disabled */ #define ee_CP 13 /* Concurrent Processing */ #define ee_CPMask 0x7 /* Word 6: */ #define ee_Stepping 0 /* Stepping info */ #define ee_StepMask 0x0F #define ee_BoardID 4 /* Manucaturer Board ID, reserved */ #define ee_BoardMask 0x0FFF /* Word 7: */ #define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */ #define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */ /*..*/ #define ee_SIZE 0x40 /* total EEprom Size */ #define ee_Checksum 0xBABA /* initial and final value for adding checksum */ /* Card identification via EEprom: */ #define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */ #define ee_addr_id 0x11 /* Word offset for Card ID */ #define ee_addr_SN 0x12 /* Serial Number */ #define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */ #define ee_vendor_intel0 0x25 /* Vendor ID Intel */ #define ee_vendor_intel1 0xD4 #define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ #define ee_id_eepro10p1 0x31 #define TX_TIMEOUT 40 /* Index to functions, as function prototypes. */ static int eepro_probe1(struct net_device *dev, int autoprobe); static int eepro_open(struct net_device *dev); static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t eepro_interrupt(int irq, void *dev_id); static void eepro_rx(struct net_device *dev); static void eepro_transmit_interrupt(struct net_device *dev); static int eepro_close(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void eepro_tx_timeout (struct net_device *dev); static int read_eeprom(int ioaddr, int location, struct net_device *dev); static int hardware_send_packet(struct net_device *dev, void *buf, short length); static int eepro_grab_irq(struct net_device *dev); /* Details of the i82595. You will need either the datasheet or the user manual to understand what is going on here. The 82595 is very different from the 82586, 82593. The receive algorithm in eepro_rx() is just an implementation of the RCV ring structure that the Intel 82595 imposes at the hardware level. The receive buffer is set at 24K, and the transmit buffer is 8K. I am assuming that the total buffer memory is 32K, which is true for the Intel EtherExpress Pro/10. If it is less than that on a generic card, the driver will be broken. The transmit algorithm in the hardware_send_packet() is similar to the one in the eepro_rx(). The transmit buffer is a ring linked list. I just queue the next available packet to the end of the list. In my system, the 82595 is so fast that the list seems to always contain a single packet. In other systems with faster computers and more congested network traffics, the ring linked list should improve performance by allowing up to 8K worth of packets to be queued. The sizes of the receive and transmit buffers can now be changed via lilo or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0" where rx-buffer is in KB unit. Modules uses the parameter mem which is also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer." The receive buffer has to be more than 3K or less than 29K. Otherwise, it is reset to the default of 24K, and, hence, 8K for the trasnmit buffer (transmit-buffer = 32K - receive-buffer). */ #define RAM_SIZE 0x8000 #define RCV_HEADER 8 #define RCV_DEFAULT_RAM 0x6000 #define XMT_HEADER 8 #define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM) #define XMT_START_PRO RCV_DEFAULT_RAM #define XMT_START_10 0x0000 #define RCV_START_PRO 0x0000 #define RCV_START_10 XMT_DEFAULT_RAM #define RCV_DONE 0x0008 #define RX_OK 0x2000 #define RX_ERROR 0x0d81 #define TX_DONE_BIT 0x0080 #define TX_OK 0x2000 #define CHAIN_BIT 0x8000 #define XMT_STATUS 0x02 #define XMT_CHAIN 0x04 #define XMT_COUNT 0x06 #define BANK0_SELECT 0x00 #define BANK1_SELECT 0x40 #define BANK2_SELECT 0x80 /* Bank 0 registers */ #define COMMAND_REG 0x00 /* Register 0 */ #define MC_SETUP 0x03 #define XMT_CMD 0x04 #define DIAGNOSE_CMD 0x07 #define RCV_ENABLE_CMD 0x08 #define RCV_DISABLE_CMD 0x0a #define STOP_RCV_CMD 0x0b #define RESET_CMD 0x0e #define POWER_DOWN_CMD 0x18 #define RESUME_XMT_CMD 0x1c #define SEL_RESET_CMD 0x1e #define STATUS_REG 0x01 /* Register 1 */ #define RX_INT 0x02 #define TX_INT 0x04 #define EXEC_STATUS 0x30 #define ID_REG 0x02 /* Register 2 */ #define R_ROBIN_BITS 0xc0 /* round robin counter */ #define ID_REG_MASK 0x2c #define ID_REG_SIG 0x24 #define AUTO_ENABLE 0x10 #define INT_MASK_REG 0x03 /* Register 3 */ #define RX_STOP_MASK 0x01 #define RX_MASK 0x02 #define TX_MASK 0x04 #define EXEC_MASK 0x08 #define ALL_MASK 0x0f #define IO_32_BIT 0x10 #define RCV_BAR 0x04 /* The following are word (16-bit) registers */ #define RCV_STOP 0x06 #define XMT_BAR_PRO 0x0a #define XMT_BAR_10 0x0b #define HOST_ADDRESS_REG 0x0c #define IO_PORT 0x0e #define IO_PORT_32_BIT 0x0c /* Bank 1 registers */ #define REG1 0x01 #define WORD_WIDTH 0x02 #define INT_ENABLE 0x80 #define INT_NO_REG 0x02 #define RCV_LOWER_LIMIT_REG 0x08 #define RCV_UPPER_LIMIT_REG 0x09 #define XMT_LOWER_LIMIT_REG_PRO 0x0a #define XMT_UPPER_LIMIT_REG_PRO 0x0b #define XMT_LOWER_LIMIT_REG_10 0x0b #define XMT_UPPER_LIMIT_REG_10 0x0a /* Bank 2 registers */ #define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */ #define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */ #define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */ #define REG2 0x02 #define PRMSC_Mode 0x01 #define Multi_IA 0x20 #define REG3 0x03 #define TPE_BIT 0x04 #define BNC_BIT 0x20 #define REG13 0x0d #define FDX 0x00 #define A_N_ENABLE 0x02 #define I_ADD_REG0 0x04 #define I_ADD_REG1 0x05 #define I_ADD_REG2 0x06 #define I_ADD_REG3 0x07 #define I_ADD_REG4 0x08 #define I_ADD_REG5 0x09 #define EEPROM_REG_PRO 0x0a #define EEPROM_REG_10 0x0b #define EESK 0x01 #define EECS 0x02 #define EEDI 0x04 #define EEDO 0x08 /* do a full reset */ #define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr) /* do a nice reset */ #define eepro_sel_reset(ioaddr) { \ outb(SEL_RESET_CMD, ioaddr); \ SLOW_DOWN; \ SLOW_DOWN; \ } /* disable all interrupts */ #define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG) /* clear all interrupts */ #define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG) /* enable tx/rx */ #define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \ ioaddr + INT_MASK_REG) /* enable exec event interrupt */ #define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG) /* enable rx */ #define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr) /* disable rx */ #define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr) /* switch bank */ #define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr) #define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr) #define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr) /* enable interrupt line */ #define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\ ioaddr + REG1) /* disable interrupt line */ #define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \ ioaddr + REG1); /* set diagnose flag */ #define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr) /* ack for rx int */ #define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG) /* ack for tx int */ #define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG) /* a complete sel reset */ #define eepro_complete_selreset(ioaddr) { \ dev->stats.tx_errors++;\ eepro_sel_reset(ioaddr);\ lp->tx_end = \ lp->xmt_lower_limit;\ lp->tx_start = lp->tx_end;\ lp->tx_last = 0;\ dev->trans_start = jiffies;\ netif_wake_queue(dev);\ eepro_en_rx(ioaddr);\ } /* Check for a network adaptor of this type, and return '0' if one exists. If dev->base_addr == 0, probe all likely locations. If dev->base_addr == 1, always return failure. If dev->base_addr == 2, allocate space for the device and return success (detachable devices only). */ static int __init do_eepro_probe(struct net_device *dev) { int i; int base_addr = dev->base_addr; int irq = dev->irq; #ifdef PnPWakeup /* XXXX for multiple cards should this only be run once? */ /* Wakeup: */ #define WakeupPort 0x279 #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\ 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\ 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\ 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43} { unsigned short int WS[32]=WakeupSeq; if (request_region(WakeupPort, 2, "eepro wakeup")) { if (net_debug>5) printk(KERN_DEBUG "Waking UP\n"); outb_p(0,WakeupPort); outb_p(0,WakeupPort); for (i=0; i<32; i++) { outb_p(WS[i],WakeupPort); if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); } release_region(WakeupPort, 2); } else printk(KERN_WARNING "PnP wakeup region busy!\n"); } #endif if (base_addr > 0x1ff) /* Check a single specified location. */ return eepro_probe1(dev, 0); else if (base_addr != 0) /* Don't probe at all. */ return -ENXIO; for (i = 0; eepro_portlist[i]; i++) { dev->base_addr = eepro_portlist[i]; dev->irq = irq; if (eepro_probe1(dev, 1) == 0) return 0; } return -ENODEV; } #ifndef MODULE struct net_device * __init eepro_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local)); int err; if (!dev) return ERR_PTR(-ENODEV); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_eepro_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static void __init printEEPROMInfo(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; unsigned short Word; int i,j; j = ee_Checksum; for (i = 0; i < 8; i++) j += lp->word[i]; for ( ; i < ee_SIZE; i++) j += read_eeprom(ioaddr, i, dev); printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff); Word = lp->word[0]; printk(KERN_DEBUG "Word0:\n"); printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP)); printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 ); printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg)); printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4); if (net_debug>4) { Word = lp->word[1]; printk(KERN_DEBUG "Word1:\n"); printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask); printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI)); printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); } Word = lp->word[5]; printk(KERN_DEBUG "Word5:\n"); printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE)); printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn)); printk(KERN_DEBUG " Has "); if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); printk(KERN_DEBUG "port(s) \n"); Word = lp->word[6]; printk(KERN_DEBUG "Word6:\n"); printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask); printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID); Word = lp->word[7]; printk(KERN_DEBUG "Word7:\n"); printk(KERN_DEBUG " INT to IRQ:\n"); for (i=0, j=0; i<15; i++) if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i); printk(KERN_DEBUG "\n"); } /* function to recalculate the limits of buffer based on rcv_ram */ static void eepro_recalc (struct net_device *dev) { struct eepro_local * lp; lp = netdev_priv(dev); lp->xmt_ram = RAM_SIZE - lp->rcv_ram; if (lp->eepro == LAN595FX_10ISA) { lp->xmt_lower_limit = XMT_START_10; lp->xmt_upper_limit = (lp->xmt_ram - 2); lp->rcv_lower_limit = lp->xmt_ram; lp->rcv_upper_limit = (RAM_SIZE - 2); } else { lp->rcv_lower_limit = RCV_START_PRO; lp->rcv_upper_limit = (lp->rcv_ram - 2); lp->xmt_lower_limit = lp->rcv_ram; lp->xmt_upper_limit = (RAM_SIZE - 2); } } /* prints boot-time info */ static void __init eepro_print_info (struct net_device *dev) { struct eepro_local * lp = netdev_priv(dev); int i; const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; i = inb(dev->base_addr + ID_REG); printk(KERN_DEBUG " id: %#x ",i); printk(" io: %#x ", (unsigned)dev->base_addr); switch (lp->eepro) { case LAN595FX_10ISA: printk("%s: Intel EtherExpress 10 ISA\n at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595FX: printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595TX: printk("%s: Intel EtherExpress Pro/10 ISA at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595: printk("%s: Intel 82595-based lan card at %#x,", dev->name, (unsigned)dev->base_addr); break; } printk(" %pM", dev->dev_addr); if (net_debug > 3) printk(KERN_DEBUG ", %dK RCV buffer", (int)(lp->rcv_ram)/1024); if (dev->irq > 2) printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); else printk(", %s.\n", ifmap[dev->if_port]); if (net_debug > 3) { i = lp->word[5]; if (i & 0x2000) /* bit 13 of EEPROM word 5 */ printk(KERN_DEBUG "%s: Concurrent Processing is " "enabled but not used!\n", dev->name); } /* Check the station address for the manufacturer's code */ if (net_debug>3) printEEPROMInfo(dev); } static const struct ethtool_ops eepro_ethtool_ops; static const struct net_device_ops eepro_netdev_ops = { .ndo_open = eepro_open, .ndo_stop = eepro_close, .ndo_start_xmit = eepro_send_packet, .ndo_set_multicast_list = set_multicast_list, .ndo_tx_timeout = eepro_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probe avoids doing writes, and verifies that the correct device exists and functions. */ static int __init eepro_probe1(struct net_device *dev, int autoprobe) { unsigned short station_addr[3], id, counter; int i; struct eepro_local *lp; int ioaddr = dev->base_addr; int err; /* Grab the region so we can find another board if autoIRQ fails. */ if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { if (!autoprobe) printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", ioaddr); return -EBUSY; } /* Now, we are going to check for the signature of the ID_REG (register 2 of bank 0) */ id = inb(ioaddr + ID_REG); if ((id & ID_REG_MASK) != ID_REG_SIG) goto exit; /* We seem to have the 82595 signature, let's play with its counter (last 2 bits of register 2 of bank 0) to be sure. */ counter = id & R_ROBIN_BITS; if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40)) goto exit; lp = netdev_priv(dev); memset(lp, 0, sizeof(struct eepro_local)); lp->xmt_bar = XMT_BAR_PRO; lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO; lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO; lp->eeprom_reg = EEPROM_REG_PRO; spin_lock_init(&lp->lock); /* Now, get the ethernet hardware address from the EEPROM */ station_addr[0] = read_eeprom(ioaddr, 2, dev); /* FIXME - find another way to know that we've found * an Etherexpress 10 */ if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) { lp->eepro = LAN595FX_10ISA; lp->eeprom_reg = EEPROM_REG_10; lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10; lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10; lp->xmt_bar = XMT_BAR_10; station_addr[0] = read_eeprom(ioaddr, 2, dev); } /* get all words at once. will be used here and for ethtool */ for (i = 0; i < 8; i++) { lp->word[i] = read_eeprom(ioaddr, i, dev); } station_addr[1] = lp->word[3]; station_addr[2] = lp->word[4]; if (!lp->eepro) { if (lp->word[7] == ee_FX_INT2IRQ) lp->eepro = 2; else if (station_addr[2] == SA_ADDR1) lp->eepro = 1; } /* Fill in the 'dev' fields. */ for (i=0; i < 6; i++) dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i]; /* RX buffer must be more than 3K and less than 29K */ if (dev->mem_end < 3072 || dev->mem_end > 29696) lp->rcv_ram = RCV_DEFAULT_RAM; /* calculate {xmt,rcv}_{lower,upper}_limit */ eepro_recalc(dev); if (GetBit(lp->word[5], ee_BNC_TPE)) dev->if_port = BNC; else dev->if_port = TPE; if (dev->irq < 2 && lp->eepro != 0) { /* Mask off INT number */ int count = lp->word[1] & 7; unsigned irqMask = lp->word[7]; while (count--) irqMask &= irqMask - 1; count = ffs(irqMask); if (count) dev->irq = count - 1; if (dev->irq < 2) { printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); goto exit; } else if (dev->irq == 2) { dev->irq = 9; } } dev->netdev_ops = &eepro_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &eepro_ethtool_ops; /* print boot time info */ eepro_print_info(dev); /* reset 82595 */ eepro_reset(ioaddr); err = register_netdev(dev); if (err) goto err; return 0; exit: err = -ENODEV; err: release_region(dev->base_addr, EEPRO_IO_EXTENT); return err; } /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. This routine should set everything up anew at each open, even registers that "should" only need to be set once at boot, so that there is non-reboot way to recover if something goes wrong. */ static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; static int eepro_grab_irq(struct net_device *dev) { int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr; eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ /* Enable the interrupt line. */ eepro_en_intline(ioaddr); /* be CAREFUL, BANK 0 now */ eepro_sw2bank0(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Let EXEC event to interrupt */ eepro_en_intexec(ioaddr); do { eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ temp_reg = inb(ioaddr + INT_NO_REG); outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) { unsigned long irq_mask; /* Twinkle the interrupt, and check if it's seen */ irq_mask = probe_irq_on(); eepro_diag(ioaddr); /* RESET the 82595 */ mdelay(20); if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */ break; /* clear all interrupts */ eepro_clear_int(ioaddr); } } while (*++irqp); eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ /* Disable the physical interrupt line. */ eepro_dis_intline(ioaddr); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ /* Mask all the interrupts. */ eepro_dis_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); return dev->irq; } static int eepro_open(struct net_device *dev) { unsigned short temp_reg, old8, old9; int irqMask; int i, ioaddr = dev->base_addr; struct eepro_local *lp = netdev_priv(dev); if (net_debug > 3) printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name); irqMask = lp->word[7]; if (lp->eepro == LAN595FX_10ISA) { if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n"); } else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */ { lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n"); } else if ((dev->dev_addr[0] == SA_ADDR0 && dev->dev_addr[1] == SA_ADDR1 && dev->dev_addr[2] == SA_ADDR2)) { lp->eepro = 1; if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n"); } /* Yes, an Intel EtherExpress Pro/10 */ else lp->eepro = 0; /* No, it is a generic 82585 lan card */ /* Get the interrupt vector for the 82595 */ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } /* Initialize the 82595. */ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ temp_reg = inb(ioaddr + lp->eeprom_reg); lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */ if (net_debug > 3) printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping); if (temp_reg & 0x10) /* Check the TurnOff Enable bit */ outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg); for (i=0; i < 6; i++) outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i); temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */ | RCV_Discard_BadFrame, ioaddr + REG1); temp_reg = inb(ioaddr + REG2); /* Match broadcast */ outb(temp_reg | 0x14, ioaddr + REG2); temp_reg = inb(ioaddr + REG3); outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */ /* Set the receiving mode */ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ /* Set the interrupt vector */ temp_reg = inb(ioaddr + INT_NO_REG); if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG); else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); temp_reg = inb(ioaddr + INT_NO_REG); if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG); else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); if (net_debug > 3) printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg); /* Initialize the RCV and XMT upper and lower limits */ outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); /* Enable the interrupt line. */ eepro_en_intline(ioaddr); /* Switch back to Bank 0 */ eepro_sw2bank0(ioaddr); /* Let RX and TX events to interrupt */ eepro_en_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Initialize RCV */ outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); lp->rx_start = lp->rcv_lower_limit; outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); /* Initialize XMT */ outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); lp->tx_start = lp->tx_end = lp->xmt_lower_limit; lp->tx_last = 0; /* Check for the i82595TX and i82595FX */ old8 = inb(ioaddr + 8); outb(~old8, ioaddr + 8); if ((temp_reg = inb(ioaddr + 8)) == old8) { if (net_debug > 3) printk(KERN_DEBUG "i82595 detected!\n"); lp->version = LAN595; } else { lp->version = LAN595TX; outb(old8, ioaddr + 8); old9 = inb(ioaddr + 9); if (irqMask==ee_FX_INT2IRQ) { if (net_debug > 3) { printk(KERN_DEBUG "IrqMask: %#x\n",irqMask); printk(KERN_DEBUG "i82595FX detected!\n"); } lp->version = LAN595FX; outb(old9, ioaddr + 9); if (dev->if_port != TPE) { /* Hopefully, this will fix the problem of using Pentiums and pro/10 w/ BNC. */ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ temp_reg = inb(ioaddr + REG13); /* disable the full duplex mode since it is not applicable with the 10Base2 cable. */ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13); eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */ } } else if (net_debug > 3) { printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff)); printk(KERN_DEBUG "i82595TX detected!\n"); } } eepro_sel_reset(ioaddr); netif_start_queue(dev); if (net_debug > 3) printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name); /* enabling rx */ eepro_en_rx(ioaddr); return 0; } static void eepro_tx_timeout (struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* if (net_debug > 1) */ printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); /* This is not a duplicate. One message for the console, one for the log file */ printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); eepro_complete_selreset(ioaddr); } static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); unsigned long flags; int ioaddr = dev->base_addr; short length = skb->len; if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return 0; length = ETH_ZLEN; } netif_stop_queue (dev); eepro_dis_int(ioaddr); spin_lock_irqsave(&lp->lock, flags); { unsigned char *buf = skb->data; if (hardware_send_packet(dev, buf, length)) /* we won't wake queue here because we're out of space */ dev->stats.tx_dropped++; else { dev->stats.tx_bytes+=skb->len; dev->trans_start = jiffies; netif_wake_queue(dev); } } dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ /* dev->stats.tx_aborted_errors++; */ if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); eepro_en_int(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); return 0; } /* The typical workload of the driver: Handle the network interface interrupts. */ static irqreturn_t eepro_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct eepro_local *lp; int ioaddr, status, boguscount = 20; int handled = 0; lp = netdev_priv(dev); spin_lock(&lp->lock); if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name); ioaddr = dev->base_addr; while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--)) { handled = 1; if (status & RX_INT) { if (net_debug > 4) printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name); eepro_dis_int(ioaddr); /* Get the received packets */ eepro_ack_rx(ioaddr); eepro_rx(dev); eepro_en_int(ioaddr); } if (status & TX_INT) { if (net_debug > 4) printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name); eepro_dis_int(ioaddr); /* Process the status of transmitted packets */ eepro_ack_tx(ioaddr); eepro_transmit_interrupt(dev); eepro_en_int(ioaddr); } } if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name); spin_unlock(&lp->lock); return IRQ_RETVAL(handled); } static int eepro_close(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; short temp_reg; netif_stop_queue(dev); eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ /* Disable the physical interrupt line. */ temp_reg = inb(ioaddr + REG1); outb(temp_reg & 0x7f, ioaddr + REG1); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ /* Flush the Tx and disable Rx. */ outb(STOP_RCV_CMD, ioaddr); lp->tx_start = lp->tx_end = lp->xmt_lower_limit; lp->tx_last = 0; /* Mask all the interrupts. */ eepro_dis_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Reset the 82595 */ eepro_reset(ioaddr); /* release the interrupt */ free_irq(dev->irq, dev); /* Update the statistics here. What statistics? */ return 0; } /* Set or clear the multicast filter for this adaptor. */ static void set_multicast_list(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned short mode; struct dev_mc_list *dmi=dev->mc_list; if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) { eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode | PRMSC_Mode, ioaddr + REG2); mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ } else if (dev->mc_count==0 ) { eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */ mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ } else { unsigned short status, *eaddrs; int i, boguscount = 0; /* Disable RX and TX interrupts. Necessary to avoid corruption of the HOST_ADDRESS_REG by interrupt service routines. */ eepro_dis_int(ioaddr); eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode | Multi_IA, ioaddr + REG2); mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG); outw(MC_SETUP, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(6*(dev->mc_count + 1), ioaddr + IO_PORT); for (i = 0; i < dev->mc_count; i++) { eaddrs=(unsigned short *)dmi->dmi_addr; dmi=dmi->next; outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); } eaddrs = (unsigned short *) dev->dev_addr; outw(eaddrs[0], ioaddr + IO_PORT); outw(eaddrs[1], ioaddr + IO_PORT); outw(eaddrs[2], ioaddr + IO_PORT); outw(lp->tx_end, ioaddr + lp->xmt_bar); outb(MC_SETUP, ioaddr); /* Update the transmit queue */ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1); if (lp->tx_start != lp->tx_end) { /* update the next address and the chain bit in the last packet */ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); outw(i, ioaddr + IO_PORT); outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); status = inw(ioaddr + IO_PORT); outw(status | CHAIN_BIT, ioaddr + IO_PORT); lp->tx_end = i ; } else { lp->tx_start = lp->tx_end = i ; } /* Acknowledge that the MC setup is done */ do { /* We should be doing this in the eepro_interrupt()! */ SLOW_DOWN; SLOW_DOWN; if (inb(ioaddr + STATUS_REG) & 0x08) { i = inb(ioaddr); outb(0x08, ioaddr + STATUS_REG); if (i & 0x20) { /* command ABORTed */ printk(KERN_NOTICE "%s: multicast setup failed.\n", dev->name); break; } else if ((i & 0x0f) == 0x03) { /* MC-Done */ printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", dev->name, dev->mc_count, dev->mc_count > 1 ? "es":""); break; } } } while (++boguscount < 100); /* Re-enable RX and TX interrupts */ eepro_en_int(ioaddr); } if (lp->eepro == LAN595FX_10ISA) { eepro_complete_selreset(ioaddr); } else eepro_en_rx(ioaddr); } /* The horrible routine to read a word from the serial EEPROM. */ /* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */ /* The delay between EEPROM clock transitions. */ #define eeprom_delay() { udelay(40); } #define EE_READ_CMD (6 << 6) static int read_eeprom(int ioaddr, int location, struct net_device *dev) { int i; unsigned short retval = 0; struct eepro_local *lp = netdev_priv(dev); short ee_addr = ioaddr + lp->eeprom_reg; int read_cmd = location | EE_READ_CMD; short ctrl_val = EECS ; /* XXXX - black magic */ eepro_sw2bank1(ioaddr); outb(0x00, ioaddr + STATUS_REG); /* XXXX - black magic */ eepro_sw2bank2(ioaddr); outb(ctrl_val, ee_addr); /* Shift the read command bits out. */ for (i = 8; i >= 0; i--) { short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI : ctrl_val; outb(outval, ee_addr); outb(outval | EESK, ee_addr); /* EEPROM clock tick. */ eeprom_delay(); outb(outval, ee_addr); /* Finish EEPROM a clock tick. */ eeprom_delay(); } outb(ctrl_val, ee_addr); for (i = 16; i > 0; i--) { outb(ctrl_val | EESK, ee_addr); eeprom_delay(); retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0); outb(ctrl_val, ee_addr); eeprom_delay(); } /* Terminate the EEPROM access. */ ctrl_val &= ~EECS; outb(ctrl_val | EESK, ee_addr); eeprom_delay(); outb(ctrl_val, ee_addr); eeprom_delay(); eepro_sw2bank0(ioaddr); return retval; } static int hardware_send_packet(struct net_device *dev, void *buf, short length) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned status, tx_available, last, end; if (net_debug > 5) printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); /* determine how much of the transmit buffer space is available */ if (lp->tx_end > lp->tx_start) tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); else if (lp->tx_end < lp->tx_start) tx_available = lp->tx_start - lp->tx_end; else tx_available = lp->xmt_ram; if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { /* No space available ??? */ return 1; } last = lp->tx_end; end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { /* Arrrr!!!, must keep the xmt header together, several days were lost to chase this one down. */ last = lp->xmt_lower_limit; end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; } else end = lp->xmt_lower_limit + (end - lp->xmt_upper_limit + 2); } outw(last, ioaddr + HOST_ADDRESS_REG); outw(XMT_CMD, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(end, ioaddr + IO_PORT); outw(length, ioaddr + IO_PORT); if (lp->version == LAN595) outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1); else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ unsigned short temp = inb(ioaddr + INT_MASK_REG); outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2); outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); } /* A dummy read to flush the DRAM write pipeline */ status = inw(ioaddr + IO_PORT); if (lp->tx_start == lp->tx_end) { outw(last, ioaddr + lp->xmt_bar); outb(XMT_CMD, ioaddr); lp->tx_start = last; /* I don't like to change tx_start here */ } else { /* update the next address and the chain bit in the last packet */ if (lp->tx_end != last) { outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); outw(last, ioaddr + IO_PORT); } outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); status = inw(ioaddr + IO_PORT); outw(status | CHAIN_BIT, ioaddr + IO_PORT); /* Continue the transmit command */ outb(RESUME_XMT_CMD, ioaddr); } lp->tx_last = last; lp->tx_end = end; if (net_debug > 5) printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name); return 0; } static void eepro_rx(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; short boguscount = 20; short rcv_car = lp->rx_start; unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size; if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name); /* Set the read pointer to the start of the RCV */ outw(rcv_car, ioaddr + HOST_ADDRESS_REG); rcv_event = inw(ioaddr + IO_PORT); while (rcv_event == RCV_DONE) { rcv_status = inw(ioaddr + IO_PORT); rcv_next_frame = inw(ioaddr + IO_PORT); rcv_size = inw(ioaddr + IO_PORT); if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) { /* Malloc up new buffer. */ struct sk_buff *skb; dev->stats.rx_bytes+=rcv_size; rcv_size &= 0x3fff; skb = dev_alloc_skb(rcv_size+5); if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); break; } skb_reserve(skb,2); if (lp->version == LAN595) insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1); else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ unsigned short temp = inb(ioaddr + INT_MASK_REG); outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), (rcv_size + 3) >> 2); outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); } skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->stats.rx_packets++; } else { /* Not sure will ever reach here, I set the 595 to discard bad received frames */ dev->stats.rx_errors++; if (rcv_status & 0x0100) dev->stats.rx_over_errors++; else if (rcv_status & 0x0400) dev->stats.rx_frame_errors++; else if (rcv_status & 0x0800) dev->stats.rx_crc_errors++; printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); } if (rcv_status & 0x1000) dev->stats.rx_length_errors++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; if (--boguscount == 0) break; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); rcv_event = inw(ioaddr + IO_PORT); } if (rcv_car == 0) rcv_car = lp->rcv_upper_limit | 0xff; outw(rcv_car - 1, ioaddr + RCV_STOP); if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name); } static void eepro_transmit_interrupt(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; short boguscount = 25; short xmt_status; while ((lp->tx_start != lp->tx_end) && boguscount--) { outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); xmt_status = inw(ioaddr+IO_PORT); if (!(xmt_status & TX_DONE_BIT)) break; xmt_status = inw(ioaddr+IO_PORT); lp->tx_start = inw(ioaddr+IO_PORT); netif_wake_queue (dev); if (xmt_status & TX_OK) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (xmt_status & 0x0400) { dev->stats.tx_carrier_errors++; printk(KERN_DEBUG "%s: carrier error\n", dev->name); printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); } else { printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); } } if (xmt_status & 0x000f) { dev->stats.collisions += (xmt_status & 0x000f); } if ((xmt_status & 0x0040) == 0x0) { dev->stats.tx_heartbeat_errors++; } } } static int eepro_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct eepro_local *lp = netdev_priv(dev); cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_Autoneg; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_Autoneg; if (GetBit(lp->word[5], ee_PortTPE)) { cmd->supported |= SUPPORTED_TP; cmd->advertising |= ADVERTISED_TP; } if (GetBit(lp->word[5], ee_PortBNC)) { cmd->supported |= SUPPORTED_BNC; cmd->advertising |= ADVERTISED_BNC; } if (GetBit(lp->word[5], ee_PortAUI)) { cmd->supported |= SUPPORTED_AUI; cmd->advertising |= ADVERTISED_AUI; } cmd->speed = SPEED_10; if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { cmd->duplex = DUPLEX_FULL; } else { cmd->duplex = DUPLEX_HALF; } cmd->port = dev->if_port; cmd->phy_address = dev->base_addr; cmd->transceiver = XCVR_INTERNAL; if (lp->word[0] & ee_AutoNeg) { cmd->autoneg = 1; } return 0; } static void eepro_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strcpy(drvinfo->driver, DRV_NAME); strcpy(drvinfo->version, DRV_VERSION); sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); } static const struct ethtool_ops eepro_ethtool_ops = { .get_settings = eepro_ethtool_get_settings, .get_drvinfo = eepro_ethtool_get_drvinfo, }; #ifdef MODULE #define MAX_EEPRO 8 static struct net_device *dev_eepro[MAX_EEPRO]; static int io[MAX_EEPRO] = { [0 ... MAX_EEPRO-1] = -1 }; static int irq[MAX_EEPRO]; static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */ [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024 }; static int autodetect; static int n_eepro; /* For linux 2.1.xx */ MODULE_AUTHOR("Pascal Dupuis and others"); MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); MODULE_LICENSE("GPL"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); module_param(autodetect, int, 0); MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); int __init init_module(void) { struct net_device *dev; int i; if (io[0] == -1 && autodetect == 0) { printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n"); printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n"); return -ENODEV; } else if (autodetect) { /* if autodetect is set then we must force detection */ for (i = 0; i < MAX_EEPRO; i++) { io[i] = 0; } printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); } for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { dev = alloc_etherdev(sizeof(struct eepro_local)); if (!dev) break; dev->mem_end = mem[i]; dev->base_addr = io[i]; dev->irq = irq[i]; if (do_eepro_probe(dev) == 0) { dev_eepro[n_eepro++] = dev; continue; } free_netdev(dev); break; } if (n_eepro) printk(KERN_INFO "%s", version); return n_eepro ? 0 : -ENODEV; } void __exit cleanup_module(void) { int i; for (i=0; i<n_eepro; i++) { struct net_device *dev = dev_eepro[i]; unregister_netdev(dev); release_region(dev->base_addr, EEPRO_IO_EXTENT); free_netdev(dev); } } #endif /* MODULE */
stevelord/PR30
linux-2.6.31/drivers/net/eepro.c
C
gpl-2.0
52,171
/* * chap.c - Challenge Handshake Authentication Protocol. * * Copyright (c) 1993 The Australian National University. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the Australian National University. The name of the University * may not be used to endorse or promote products derived from this * software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Copyright (c) 1991 Gregory M. Christy. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by Gregory M. Christy. The name of the author may not be used to * endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #define RCSID "$Id$" /* * TODO: */ #include <stdio.h> #include <string.h> #include <stdlib.h> /* drand48, srand48 */ #include <sys/types.h> #include <sys/time.h> #include "pppd.h" #include "chap.h" #include "md5.h" #ifdef CHAPMS #include "chap_ms.h" #endif static const char rcsid[] = RCSID; /* * Command-line options. */ static option_t chap_option_list[] = { { "chap-restart", o_int, &chap[0].timeouttime, "Set timeout for CHAP", 0, NULL, 0, 0 }, { "chap-max-challenge", o_int, &chap[0].max_transmits, "Set max #xmits for challenge", 0, NULL, 0, 0 }, { "chap-interval", o_int, &chap[0].chal_interval, "Set interval for rechallenge", 0, NULL, 0, 0 }, #ifdef MSLANMAN { "ms-lanman", o_bool, &ms_lanman, "Use LanMan passwd when using MS-CHAP", 1, NULL, 0, 0 }, #endif { NULL, 0, NULL, NULL, 0, NULL, 0, 0 } }; /* * Protocol entry points. */ static void ChapInit(int); static void ChapLowerUp(int); static void ChapLowerDown(int); static void ChapInput(int, u_char *, int); static void ChapProtocolReject(int); static int ChapPrintPkt(u_char *, int, void (*)(void *, char *, ...), void *); struct protent chap_protent = { PPP_CHAP, ChapInit, ChapInput, ChapProtocolReject, ChapLowerUp, ChapLowerDown, NULL, NULL, ChapPrintPkt, NULL, 1, "CHAP", NULL, chap_option_list, NULL, NULL, NULL }; chap_state chap[NUM_PPP]; /* CHAP state; one for each unit */ static void ChapChallengeTimeout(void *); static void ChapResponseTimeout(void *); static void ChapReceiveChallenge(chap_state *, u_char *, int, int); static void ChapRechallenge(void *); static void ChapReceiveResponse(chap_state *, u_char *, int, int); static void ChapReceiveSuccess(chap_state *, u_char *, u_char, int); static void ChapReceiveFailure(chap_state *, u_char *, u_char, int); static void ChapSendStatus(chap_state *, int); static void ChapSendChallenge(chap_state *); static void ChapSendResponse(chap_state *); static void ChapGenChallenge(chap_state *); /* * ChapInit - Initialize a CHAP unit. */ static void ChapInit( int unit) { chap_state *cstate = &chap[unit]; BZERO(cstate, sizeof(*cstate)); cstate->unit = unit; cstate->clientstate = CHAPCS_INITIAL; cstate->serverstate = CHAPSS_INITIAL; cstate->timeouttime = CHAP_DEFTIMEOUT; cstate->max_transmits = CHAP_DEFTRANSMITS; /* random number generator is initialized in magic_init */ } /* * ChapAuthWithPeer - Authenticate us with our peer (start client). * */ void ChapAuthWithPeer( int unit, char *our_name, int digest) { chap_state *cstate = &chap[unit]; cstate->resp_name = our_name; cstate->resp_type = digest; if (cstate->clientstate == CHAPCS_INITIAL || cstate->clientstate == CHAPCS_PENDING) { /* lower layer isn't up - wait until later */ cstate->clientstate = CHAPCS_PENDING; return; } /* * We get here as a result of LCP coming up. * So even if CHAP was open before, we will * have to re-authenticate ourselves. */ cstate->clientstate = CHAPCS_LISTEN; } /* * ChapAuthPeer - Authenticate our peer (start server). */ void ChapAuthPeer( int unit, char *our_name, int digest) { chap_state *cstate = &chap[unit]; cstate->chal_name = our_name; cstate->chal_type = digest; if (cstate->serverstate == CHAPSS_INITIAL || cstate->serverstate == CHAPSS_PENDING) { /* lower layer isn't up - wait until later */ cstate->serverstate = CHAPSS_PENDING; return; } ChapGenChallenge(cstate); ChapSendChallenge(cstate); /* crank it up dude! */ cstate->serverstate = CHAPSS_INITIAL_CHAL; } /* * ChapChallengeTimeout - Timeout expired on sending challenge. */ static void ChapChallengeTimeout( void *arg) { chap_state *cstate = (chap_state *) arg; /* if we aren't sending challenges, don't worry. then again we */ /* probably shouldn't be here either */ if (cstate->serverstate != CHAPSS_INITIAL_CHAL && cstate->serverstate != CHAPSS_RECHALLENGE) return; if (cstate->chal_transmits >= cstate->max_transmits) { /* give up on peer */ error("Peer failed to respond to CHAP challenge"); cstate->serverstate = CHAPSS_BADAUTH; auth_peer_fail(cstate->unit, PPP_CHAP); return; } ChapSendChallenge(cstate); /* Re-send challenge */ } /* * ChapResponseTimeout - Timeout expired on sending response. */ static void ChapResponseTimeout( void *arg) { chap_state *cstate = (chap_state *) arg; /* if we aren't sending a response, don't worry. */ if (cstate->clientstate != CHAPCS_RESPONSE) return; ChapSendResponse(cstate); /* re-send response */ } /* * ChapRechallenge - Time to challenge the peer again. */ static void ChapRechallenge( void *arg) { chap_state *cstate = (chap_state *) arg; /* if we aren't sending a response, don't worry. */ if (cstate->serverstate != CHAPSS_OPEN) return; ChapGenChallenge(cstate); ChapSendChallenge(cstate); cstate->serverstate = CHAPSS_RECHALLENGE; } /* * ChapLowerUp - The lower layer is up. * * Start up if we have pending requests. */ static void ChapLowerUp( int unit) { chap_state *cstate = &chap[unit]; if (cstate->clientstate == CHAPCS_INITIAL) cstate->clientstate = CHAPCS_CLOSED; else if (cstate->clientstate == CHAPCS_PENDING) cstate->clientstate = CHAPCS_LISTEN; if (cstate->serverstate == CHAPSS_INITIAL) cstate->serverstate = CHAPSS_CLOSED; else if (cstate->serverstate == CHAPSS_PENDING) { ChapGenChallenge(cstate); ChapSendChallenge(cstate); cstate->serverstate = CHAPSS_INITIAL_CHAL; } } /* * ChapLowerDown - The lower layer is down. * * Cancel all timeouts. */ static void ChapLowerDown( int unit) { chap_state *cstate = &chap[unit]; /* Timeout(s) pending? Cancel if so. */ if (cstate->serverstate == CHAPSS_INITIAL_CHAL || cstate->serverstate == CHAPSS_RECHALLENGE) UNTIMEOUT(ChapChallengeTimeout, cstate); else if (cstate->serverstate == CHAPSS_OPEN && cstate->chal_interval != 0) UNTIMEOUT(ChapRechallenge, cstate); if (cstate->clientstate == CHAPCS_RESPONSE) UNTIMEOUT(ChapResponseTimeout, cstate); cstate->clientstate = CHAPCS_INITIAL; cstate->serverstate = CHAPSS_INITIAL; } /* * ChapProtocolReject - Peer doesn't grok CHAP. */ static void ChapProtocolReject( int unit) { chap_state *cstate = &chap[unit]; if (cstate->serverstate != CHAPSS_INITIAL && cstate->serverstate != CHAPSS_CLOSED) auth_peer_fail(unit, PPP_CHAP); if (cstate->clientstate != CHAPCS_INITIAL && cstate->clientstate != CHAPCS_CLOSED) auth_withpeer_fail(unit, PPP_CHAP); ChapLowerDown(unit); /* shutdown chap */ } /* * ChapInput - Input CHAP packet. */ static void ChapInput( int unit, u_char *inpacket, int packet_len) { chap_state *cstate = &chap[unit]; u_char *inp; u_char code, id; int len; /* * Parse header (code, id and length). * If packet too short, drop it. */ inp = inpacket; if (packet_len < CHAP_HEADERLEN) { CHAPDEBUG(("ChapInput: rcvd short header.")); return; } GETCHAR(code, inp); GETCHAR(id, inp); GETSHORT(len, inp); if (len < CHAP_HEADERLEN) { CHAPDEBUG(("ChapInput: rcvd illegal length.")); return; } if (len > packet_len) { CHAPDEBUG(("ChapInput: rcvd short packet.")); return; } len -= CHAP_HEADERLEN; /* * Action depends on code (as in fact it usually does :-). */ switch (code) { case CHAP_CHALLENGE: ChapReceiveChallenge(cstate, inp, id, len); break; case CHAP_RESPONSE: ChapReceiveResponse(cstate, inp, id, len); break; case CHAP_FAILURE: ChapReceiveFailure(cstate, inp, id, len); break; case CHAP_SUCCESS: ChapReceiveSuccess(cstate, inp, id, len); break; default: /* Need code reject? */ warn("Unknown CHAP code (%d) received.", code); break; } } /* * ChapReceiveChallenge - Receive Challenge and send Response. */ static void ChapReceiveChallenge( chap_state *cstate, u_char *inp, int id, int len) { int rchallenge_len; u_char *rchallenge; int secret_len; unsigned char secret[MAXSECRETLEN]; char rhostname[256]; MD5_CTX mdContext; u_char hash[MD5_SIGNATURE_SIZE]; if (cstate->clientstate == CHAPCS_CLOSED || cstate->clientstate == CHAPCS_PENDING) { CHAPDEBUG(("ChapReceiveChallenge: in state %d", cstate->clientstate)); return; } if (len < 2) { CHAPDEBUG(("ChapReceiveChallenge: rcvd short packet.")); return; } GETCHAR(rchallenge_len, inp); len -= sizeof (u_char) + rchallenge_len; /* now name field length */ if (len < 0) { CHAPDEBUG(("ChapReceiveChallenge: rcvd short packet.")); return; } rchallenge = inp; INCPTR(rchallenge_len, inp); if (len >= sizeof(rhostname)) len = sizeof(rhostname) - 1; BCOPY(inp, rhostname, len); rhostname[len] = '\000'; /* Microsoft doesn't send their name back in the PPP packet */ if (explicit_remote || (remote_name[0] != 0 && rhostname[0] == 0)) { strlcpy(rhostname, remote_name, sizeof(rhostname)); CHAPDEBUG(("ChapReceiveChallenge: using '%q' as remote name", rhostname)); } /* get secret for authenticating ourselves with the specified host */ if (!get_secret(cstate->unit, cstate->resp_name, rhostname, secret, &secret_len, 0)) { secret_len = 0; /* assume null secret if can't find one */ warn("No CHAP secret found for authenticating us to %q", rhostname); } /* cancel response send timeout if necessary */ if (cstate->clientstate == CHAPCS_RESPONSE) UNTIMEOUT(ChapResponseTimeout, cstate); cstate->resp_id = id; cstate->resp_transmits = 0; /* generate MD based on negotiated type */ switch (cstate->resp_type) { case CHAP_DIGEST_MD5: MD5Init(&mdContext); MD5Update(&mdContext, &cstate->resp_id, 1); MD5Update(&mdContext, secret, secret_len); MD5Update(&mdContext, rchallenge, rchallenge_len); MD5Final(hash, &mdContext); BCOPY(hash, cstate->response, MD5_SIGNATURE_SIZE); cstate->resp_length = MD5_SIGNATURE_SIZE; break; #ifdef CHAPMS case CHAP_MICROSOFT: ChapMS(cstate, rchallenge, rchallenge_len, secret, secret_len); break; #endif default: CHAPDEBUG(("unknown digest type %d", cstate->resp_type)); return; } BZERO(secret, sizeof(secret)); ChapSendResponse(cstate); } /* * ChapReceiveResponse - Receive and process response. */ static void ChapReceiveResponse( chap_state *cstate, u_char *inp, int id, int len) { u_char *remmd, remmd_len; int secret_len, old_state; int code; char rhostname[256]; MD5_CTX mdContext; unsigned char secret[MAXSECRETLEN]; u_char hash[MD5_SIGNATURE_SIZE]; if (cstate->serverstate == CHAPSS_CLOSED || cstate->serverstate == CHAPSS_PENDING) { CHAPDEBUG(("ChapReceiveResponse: in state %d", cstate->serverstate)); return; } if (id != cstate->chal_id) return; /* doesn't match ID of last challenge */ /* * If we have received a duplicate or bogus Response, * we have to send the same answer (Success/Failure) * as we did for the first Response we saw. */ if (cstate->serverstate == CHAPSS_OPEN) { ChapSendStatus(cstate, CHAP_SUCCESS); return; } if (cstate->serverstate == CHAPSS_BADAUTH) { ChapSendStatus(cstate, CHAP_FAILURE); return; } if (len < 2) { CHAPDEBUG(("ChapReceiveResponse: rcvd short packet.")); return; } GETCHAR(remmd_len, inp); /* get length of MD */ remmd = inp; /* get pointer to MD */ INCPTR(remmd_len, inp); len -= sizeof (u_char) + remmd_len; if (len < 0) { CHAPDEBUG(("ChapReceiveResponse: rcvd short packet.")); return; } UNTIMEOUT(ChapChallengeTimeout, cstate); if (len >= sizeof(rhostname)) len = sizeof(rhostname) - 1; BCOPY(inp, rhostname, len); rhostname[len] = '\000'; /* * Get secret for authenticating them with us, * do the hash ourselves, and compare the result. */ code = CHAP_FAILURE; if (!get_secret(cstate->unit, (explicit_remote? remote_name: rhostname), cstate->chal_name, secret, &secret_len, 1)) { warn("No CHAP secret found for authenticating %q", rhostname); } else { /* generate MD based on negotiated type */ switch (cstate->chal_type) { case CHAP_DIGEST_MD5: /* only MD5 is defined for now */ if (remmd_len != MD5_SIGNATURE_SIZE) break; /* it's not even the right length */ MD5Init(&mdContext); MD5Update(&mdContext, &cstate->chal_id, 1); MD5Update(&mdContext, secret, secret_len); MD5Update(&mdContext, cstate->challenge, cstate->chal_len); MD5Final(hash, &mdContext); /* compare local and remote MDs and send the appropriate status */ if (memcmp (hash, remmd, MD5_SIGNATURE_SIZE) == 0) code = CHAP_SUCCESS; /* they are the same! */ break; default: CHAPDEBUG(("unknown digest type %d", cstate->chal_type)); } } BZERO(secret, sizeof(secret)); ChapSendStatus(cstate, code); if (code == CHAP_SUCCESS) { old_state = cstate->serverstate; cstate->serverstate = CHAPSS_OPEN; if (old_state == CHAPSS_INITIAL_CHAL) { auth_peer_success(cstate->unit, PPP_CHAP, rhostname, len); } if (cstate->chal_interval != 0) TIMEOUT(ChapRechallenge, cstate, cstate->chal_interval); notice("CHAP peer authentication succeeded for %q", rhostname); } else { error("CHAP peer authentication failed for remote host %q", rhostname); cstate->serverstate = CHAPSS_BADAUTH; auth_peer_fail(cstate->unit, PPP_CHAP); } } /* * ChapReceiveSuccess - Receive Success */ static void ChapReceiveSuccess( chap_state *cstate, u_char *inp, u_char id, int len) { if (cstate->clientstate == CHAPCS_OPEN) /* presumably an answer to a duplicate response */ return; if (cstate->clientstate != CHAPCS_RESPONSE) { /* don't know what this is */ CHAPDEBUG(("ChapReceiveSuccess: in state %d\n", cstate->clientstate)); return; } UNTIMEOUT(ChapResponseTimeout, cstate); /* * Print message. */ if (len > 0) PRINTMSG(inp, len); cstate->clientstate = CHAPCS_OPEN; auth_withpeer_success(cstate->unit, PPP_CHAP); } /* * ChapReceiveFailure - Receive failure. */ static void ChapReceiveFailure( chap_state *cstate, u_char *inp, u_char id, int len) { if (cstate->clientstate != CHAPCS_RESPONSE) { /* don't know what this is */ CHAPDEBUG(("ChapReceiveFailure: in state %d\n", cstate->clientstate)); return; } UNTIMEOUT(ChapResponseTimeout, cstate); /* * Print message. */ if (len > 0) PRINTMSG(inp, len); error("CHAP authentication failed"); auth_withpeer_fail(cstate->unit, PPP_CHAP); } /* * ChapSendChallenge - Send an Authenticate challenge. */ static void ChapSendChallenge( chap_state *cstate) { u_char *outp; int chal_len, name_len; int outlen; chal_len = cstate->chal_len; name_len = strlen(cstate->chal_name); outlen = CHAP_HEADERLEN + sizeof (u_char) + chal_len + name_len; outp = outpacket_buf; MAKEHEADER(outp, PPP_CHAP); /* paste in a CHAP header */ PUTCHAR(CHAP_CHALLENGE, outp); PUTCHAR(cstate->chal_id, outp); PUTSHORT(outlen, outp); PUTCHAR(chal_len, outp); /* put length of challenge */ BCOPY(cstate->challenge, outp, chal_len); INCPTR(chal_len, outp); BCOPY(cstate->chal_name, outp, name_len); /* append hostname */ output(cstate->unit, outpacket_buf, outlen + PPP_HDRLEN); TIMEOUT(ChapChallengeTimeout, cstate, cstate->timeouttime); ++cstate->chal_transmits; } /* * ChapSendStatus - Send a status response (ack or nak). */ static void ChapSendStatus( chap_state *cstate, int code) { u_char *outp; int outlen, msglen; char msg[256]; if (code == CHAP_SUCCESS) slprintf(msg, sizeof(msg), "Welcome to %s.", hostname); else slprintf(msg, sizeof(msg), "I don't like you. Go 'way."); msglen = strlen(msg); outlen = CHAP_HEADERLEN + msglen; outp = outpacket_buf; MAKEHEADER(outp, PPP_CHAP); /* paste in a header */ PUTCHAR(code, outp); PUTCHAR(cstate->chal_id, outp); PUTSHORT(outlen, outp); BCOPY(msg, outp, msglen); output(cstate->unit, outpacket_buf, outlen + PPP_HDRLEN); } /* * ChapGenChallenge is used to generate a pseudo-random challenge string of * a pseudo-random length between min_len and max_len. The challenge * string and its length are stored in *cstate, and various other fields of * *cstate are initialized. */ static void ChapGenChallenge( chap_state *cstate) { int chal_len; u_char *ptr = cstate->challenge; int i; /* pick a random challenge length between MIN_CHALLENGE_LENGTH and MAX_CHALLENGE_LENGTH */ chal_len = (unsigned) ((drand48() * (MAX_CHALLENGE_LENGTH - MIN_CHALLENGE_LENGTH)) + MIN_CHALLENGE_LENGTH); cstate->chal_len = chal_len; cstate->chal_id = ++cstate->id; cstate->chal_transmits = 0; /* generate a random string */ for (i = 0; i < chal_len; i++) *ptr++ = (char) (drand48() * 0xff); } /* * ChapSendResponse - send a response packet with values as specified * in *cstate. */ /* ARGSUSED */ static void ChapSendResponse( chap_state *cstate) { u_char *outp; int outlen, md_len, name_len; md_len = cstate->resp_length; name_len = strlen(cstate->resp_name); outlen = CHAP_HEADERLEN + sizeof (u_char) + md_len + name_len; outp = outpacket_buf; MAKEHEADER(outp, PPP_CHAP); PUTCHAR(CHAP_RESPONSE, outp); /* we are a response */ PUTCHAR(cstate->resp_id, outp); /* copy id from challenge packet */ PUTSHORT(outlen, outp); /* packet length */ PUTCHAR(md_len, outp); /* length of MD */ BCOPY(cstate->response, outp, md_len); /* copy MD to buffer */ INCPTR(md_len, outp); BCOPY(cstate->resp_name, outp, name_len); /* append our name */ /* send the packet */ output(cstate->unit, outpacket_buf, outlen + PPP_HDRLEN); cstate->clientstate = CHAPCS_RESPONSE; TIMEOUT(ChapResponseTimeout, cstate, cstate->timeouttime); ++cstate->resp_transmits; } /* * ChapPrintPkt - print the contents of a CHAP packet. */ static char *ChapCodenames[] = { "Challenge", "Response", "Success", "Failure" }; static int ChapPrintPkt( u_char *p, int plen, void (*printer)(void *, char *, ...), void *arg) { int code, id, len; int clen, nlen; u_char x; if (plen < CHAP_HEADERLEN) return 0; GETCHAR(code, p); GETCHAR(id, p); GETSHORT(len, p); if (len < CHAP_HEADERLEN || len > plen) return 0; if (code >= 1 && code <= sizeof(ChapCodenames) / sizeof(char *)) printer(arg, " %s", ChapCodenames[code-1]); else printer(arg, " code=0x%x", code); printer(arg, " id=0x%x", id); len -= CHAP_HEADERLEN; switch (code) { case CHAP_CHALLENGE: case CHAP_RESPONSE: if (len < 1) break; clen = p[0]; if (len < clen + 1) break; ++p; nlen = len - clen - 1; printer(arg, " <"); for (; clen > 0; --clen) { GETCHAR(x, p); printer(arg, "%.2x", x); } printer(arg, ">, name = "); print_string((char *)p, nlen, printer, arg); break; case CHAP_FAILURE: case CHAP_SUCCESS: printer(arg, " "); print_string((char *)p, len, printer, arg); break; default: for (clen = len; clen > 0; --clen) { GETCHAR(x, p); printer(arg, " %.2x", x); } } return len + CHAP_HEADERLEN; }
lizhuobin1981/rtems_test
cpukit/pppd/chap.c
C
gpl-2.0
21,275
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * ROUTE - implementation of the IP router. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Linus Torvalds, <Linus.Torvalds@helsinki.fi> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Fixes: * Alan Cox : Verify area fixes. * Alan Cox : cli() protects routing changes * Rui Oliveira : ICMP routing table updates * (rco@di.uminho.pt) Routing table insertion and update * Linus Torvalds : Rewrote bits to be sensible * Alan Cox : Added BSD route gw semantics * Alan Cox : Super /proc >4K * Alan Cox : MTU in route table * Alan Cox : MSS actually. Also added the window * clamper. * Sam Lantinga : Fixed route matching in rt_del() * Alan Cox : Routing cache support. * Alan Cox : Removed compatibility cruft. * Alan Cox : RTF_REJECT support. * Alan Cox : TCP irtt support. * Jonathan Naylor : Added Metric support. * Miquel van Smoorenburg : BSD API fixes. * Miquel van Smoorenburg : Metrics. * Alan Cox : Use __u32 properly * Alan Cox : Aligned routing errors more closely with BSD * our system is still very different. * Alan Cox : Faster /proc handling * Alexey Kuznetsov : Massive rework to support tree based routing, * routing caches and better behaviour. * * Olaf Erb : irtt wasn't being copied right. * Bjorn Ekwall : Kerneld route support. * Alan Cox : Multicast fixed (I hope) * Pavel Krauz : Limited broadcast fixed * Mike McLagan : Routing by source * Alexey Kuznetsov : End of old history. Split to fib.c and * route.c and rewritten from scratch. * Andi Kleen : Load-limit warning messages. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Vitaly E. Lavrov : Race condition in ip_route_input_slow. * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. * Vladimir V. Ivanov : IP rule info (flowid) is really useful. * Marc Boucher : routing by fwmark * Robert Olsson : Added rt_cache statistics * Arnaldo C. Melo : Convert proc stuff to seq_file * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect * Ilia Sotnikov : Removed TOS from hash calculations * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/module.h> #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/pkt_sched.h> #include <linux/mroute.h> #include <linux/netfilter_ipv4.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/jhash.h> #include <net/dst.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> #include <net/route.h> #include <net/inetpeer.h> #include <net/sock.h> #include <net/ip_fib.h> #include <net/arp.h> #include <net/tcp.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/netevent.h> #include <net/rtnetlink.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #include <linux/kmemleak.h> #endif #include <net/secure_seq.h> #define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) #define RT_GC_TIMEOUT (300*HZ) static int ip_rt_max_size; static int ip_rt_redirect_number __read_mostly = 9; static int ip_rt_redirect_load __read_mostly = HZ / 50; static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; /* * Interface to generic destination cache. */ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ipv4_default_advmss(const struct dst_entry *dst); static unsigned int ipv4_mtu(const struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu); static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static void ipv4_dst_destroy(struct dst_entry *dst); static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) { WARN_ON(1); return NULL; } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .mtu = ipv4_mtu, .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .negative_advice = ipv4_negative_advice, .link_failure = ipv4_link_failure, .update_pmtu = ip_rt_update_pmtu, .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, }; #define ECN_OR_COST(class) TC_PRIO_##class const __u8 ip_tos2prio[16] = { TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos) return NULL; return SEQ_START_TOKEN; } static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return NULL; } static void rt_cache_seq_stop(struct seq_file *seq, void *v) { } static int rt_cache_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" "HHUptod\tSpecDst"); return 0; } static const struct seq_operations rt_cache_seq_ops = { .start = rt_cache_seq_start, .next = rt_cache_seq_next, .stop = rt_cache_seq_stop, .show = rt_cache_seq_show, }; static int rt_cache_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cache_seq_ops); } static const struct file_operations rt_cache_seq_fops = { .owner = THIS_MODULE, .open = rt_cache_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) { int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; } static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int cpu; for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; } static void rt_cpu_seq_stop(struct seq_file *seq, void *v) { } static int rt_cpu_seq_show(struct seq_file *seq, void *v) { struct rt_cache_stat *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; } seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", dst_entries_get_slow(&ipv4_dst_ops), 0, /* st->in_hit */ st->in_slow_tot, st->in_slow_mc, st->in_no_route, st->in_brd, st->in_martian_dst, st->in_martian_src, 0, /* st->out_hit */ st->out_slow_tot, st->out_slow_mc, 0, /* st->gc_total */ 0, /* st->gc_ignored */ 0, /* st->gc_goal_miss */ 0, /* st->gc_dst_overflow */ 0, /* st->in_hlist_search */ 0 /* st->out_hlist_search */ ); return 0; } static const struct seq_operations rt_cpu_seq_ops = { .start = rt_cpu_seq_start, .next = rt_cpu_seq_next, .stop = rt_cpu_seq_stop, .show = rt_cpu_seq_show, }; static int rt_cpu_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cpu_seq_ops); } static const struct file_operations rt_cpu_seq_fops = { .owner = THIS_MODULE, .open = rt_cpu_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_IP_ROUTE_CLASSID static int rt_acct_proc_show(struct seq_file *m, void *v) { struct ip_rt_acct *dst, *src; unsigned int i, j; dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); if (!dst) return -ENOMEM; for_each_possible_cpu(i) { src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); for (j = 0; j < 256; j++) { dst[j].o_bytes += src[j].o_bytes; dst[j].o_packets += src[j].o_packets; dst[j].i_bytes += src[j].i_bytes; dst[j].i_packets += src[j].i_packets; } } seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); kfree(dst); return 0; } static int rt_acct_proc_open(struct inode *inode, struct file *file) { return single_open(file, rt_acct_proc_show, NULL); } static const struct file_operations rt_acct_proc_fops = { .owner = THIS_MODULE, .open = rt_acct_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static int __net_init ip_rt_do_proc_init(struct net *net) { struct proc_dir_entry *pde; pde = proc_create("rt_cache", S_IRUGO, net->proc_net, &rt_cache_seq_fops); if (!pde) goto err1; pde = proc_create("rt_cache", S_IRUGO, net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; #ifdef CONFIG_IP_ROUTE_CLASSID pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops); if (!pde) goto err3; #endif return 0; #ifdef CONFIG_IP_ROUTE_CLASSID err3: remove_proc_entry("rt_cache", net->proc_net_stat); #endif err2: remove_proc_entry("rt_cache", net->proc_net); err1: return -ENOMEM; } static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); #ifdef CONFIG_IP_ROUTE_CLASSID remove_proc_entry("rt_acct", net->proc_net); #endif } static struct pernet_operations ip_rt_proc_ops __net_initdata = { .init = ip_rt_do_proc_init, .exit = ip_rt_do_proc_exit, }; static int __init ip_rt_proc_init(void) { return register_pernet_subsys(&ip_rt_proc_ops); } #else static inline int ip_rt_proc_init(void) { return 0; } #endif /* CONFIG_PROC_FS */ static inline bool rt_is_expired(const struct rtable *rth) { return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); } void rt_cache_flush(struct net *net) { rt_genid_bump_ipv4(net); } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; struct neighbour *n; rt = (const struct rtable *) dst; if (rt->rt_gateway) pkey = (const __be32 *) &rt->rt_gateway; else if (skb) pkey = &ip_hdr(skb)->daddr; n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) return n; return neigh_create(&arp_tbl, pkey, dev); } #define IP_IDENTS_SZ 2048u struct ip_ident_bucket { atomic_t id; u32 stamp32; }; static struct ip_ident_bucket *ip_idents __read_mostly; /* In order to protect privacy, we add a perturbation to identifiers * if one generator is seldom used. This makes hard for an attacker * to infer how many packets were sent between two points in time. */ u32 ip_idents_reserve(u32 hash, int segs) { struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ; u32 old = ACCESS_ONCE(bucket->stamp32); u32 now = (u32)jiffies; u32 delta = 0; if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) delta = prandom_u32_max(now - old); return atomic_add_return(segs + delta, &bucket->id) - segs; } EXPORT_SYMBOL(ip_idents_reserve); void __ip_select_ident(struct iphdr *iph, int segs) { static u32 ip_idents_hashrnd __read_mostly; u32 hash, id; net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); hash = jhash_3words((__force u32)iph->daddr, (__force u32)iph->saddr, iph->protocol, ip_idents_hashrnd); id = ip_idents_reserve(hash, segs); iph->id = htons(id); } EXPORT_SYMBOL(__ip_select_ident); static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, int oif, u8 tos, u8 prot, u32 mark, int flow_flags) { if (sk) { const struct inet_sock *inet = inet_sk(sk); oif = sk->sk_bound_dev_if; mark = sk->sk_mark; tos = RT_CONN_FLAGS(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; } flowi4_init_output(fl4, oif, mark, tos, RT_SCOPE_UNIVERSE, prot, flow_flags, iph->daddr, iph->saddr, 0, 0); } static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, const struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0); } static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, inet->inet_saddr, 0, 0); rcu_read_unlock(); } static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct sk_buff *skb) { if (skb) build_skb_flow_key(fl4, skb, sk); else build_sk_flow_key(fl4, sk); } static inline void rt_free(struct rtable *rt) { call_rcu(&rt->dst.rcu_head, dst_rcu_free); } static DEFINE_SPINLOCK(fnhe_lock); static void fnhe_flush_routes(struct fib_nh_exception *fnhe) { struct rtable *rt; rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); rt_free(rt); } rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); rt_free(rt); } } static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { struct fib_nh_exception *fnhe, *oldest; oldest = rcu_dereference(hash->chain); for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; } fnhe_flush_routes(oldest); return oldest; } static inline u32 fnhe_hashfun(__be32 daddr) { static u32 fnhe_hashrnd __read_mostly; u32 hval; net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); return hash_32(hval, FNHE_HASH_SHIFT); } static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) { rt->rt_pmtu = fnhe->fnhe_pmtu; rt->dst.expires = fnhe->fnhe_expires; if (fnhe->fnhe_gw) { rt->rt_flags |= RTCF_REDIRECTED; rt->rt_gateway = fnhe->fnhe_gw; rt->rt_uses_gateway = 1; } } static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, u32 pmtu, unsigned long expires) { struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; struct rtable *rt; unsigned int i; int depth; u32 hval = fnhe_hashfun(daddr); spin_lock_bh(&fnhe_lock); hash = rcu_dereference(nh->nh_exceptions); if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; rcu_assign_pointer(nh->nh_exceptions, hash); } hash += hval; depth = 0; for (fnhe = rcu_dereference(hash->chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (fnhe->fnhe_daddr == daddr) break; depth++; } if (fnhe) { if (gw) fnhe->fnhe_gw = gw; if (pmtu) { fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_expires = max(1UL, expires); } /* Update all cached dsts too */ rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) fill_route_from_fnhe(rt, fnhe); rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) fill_route_from_fnhe(rt, fnhe); } else { if (depth > FNHE_RECLAIM_DEPTH) fnhe = fnhe_oldest(hash); else { fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); if (!fnhe) goto out_unlock; fnhe->fnhe_next = hash->chain; rcu_assign_pointer(hash->chain, fnhe); } fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev)); fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_expires = expires; /* Exception created; mark the cached routes for the nexthop * stale, so anyone caching it rechecks if this exception * applies to them. */ rt = rcu_dereference(nh->nh_rth_input); if (rt) rt->dst.obsolete = DST_OBSOLETE_KILL; for_each_possible_cpu(i) { struct rtable __rcu **prt; prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); rt = rcu_dereference(*prt); if (rt) rt->dst.obsolete = DST_OBSOLETE_KILL; } } fnhe->fnhe_stamp = jiffies; out_unlock: spin_unlock_bh(&fnhe_lock); } static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, bool kill_route) { __be32 new_gw = icmp_hdr(skb)->un.gateway; __be32 old_gw = ip_hdr(skb)->saddr; struct net_device *dev = skb->dev; struct in_device *in_dev; struct fib_result res; struct neighbour *n; struct net *net; switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: break; default: return; } if (rt->rt_gateway != old_gw) return; in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; net = dev_net(dev); if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || ipv4_is_zeronet(new_gw)) goto reject_redirect; if (!IN_DEV_SHARED_MEDIA(in_dev)) { if (!inet_addr_onlink(in_dev, new_gw, old_gw)) goto reject_redirect; if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) goto reject_redirect; } else { if (inet_addr_type(net, new_gw) != RTN_UNICAST) goto reject_redirect; } n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); if (!IS_ERR(n)) { if (!(n->nud_state & NUD_VALID)) { neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, new_gw, 0, 0); } if (kill_route) rt->dst.obsolete = DST_OBSOLETE_KILL; call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); } neigh_release(n); } return; reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev)) { const struct iphdr *iph = (const struct iphdr *) skb->data; __be32 daddr = iph->daddr; __be32 saddr = iph->saddr; net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" " Advised path = %pI4 -> %pI4\n", &old_gw, dev->name, &new_gw, &saddr, &daddr); } #endif ; } static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi4 fl4; const struct iphdr *iph = (const struct iphdr *) skb->data; int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; rt = (struct rtable *) dst; __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0); __ip_do_redirect(rt, skb, &fl4, true); } static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) { struct rtable *rt = (struct rtable *)dst; struct dst_entry *ret = dst; if (rt) { if (dst->obsolete > 0) { ip_rt_put(rt); ret = NULL; } else if ((rt->rt_flags & RTCF_REDIRECTED) || rt->dst.expires) { ip_rt_put(rt); ret = NULL; } } return ret; } /* * Algorithm: * 1. The first ip_rt_redirect_number redirects are sent * with exponential backoff, then we stop sending them at all, * assuming that the host ignores our redirects. * 2. If we did not see packets requiring redirects * during ip_rt_redirect_silence, we assume that the host * forgot redirected route and start to send redirects again. * * This algorithm is much cheaper and more intelligent than dumb load limiting * in icmp.c. * * NOTE. Do not forget to inhibit load limiting for redirects (redundant) * and "frag. need" (breaks PMTU discovery) in icmp.c. */ void ip_rt_send_redirect(struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct in_device *in_dev; struct inet_peer *peer; struct net *net; int log_martians; rcu_read_lock(); in_dev = __in_dev_get_rcu(rt->dst.dev); if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { rcu_read_unlock(); return; } log_martians = IN_DEV_LOG_MARTIANS(in_dev); rcu_read_unlock(); net = dev_net(rt->dst.dev); peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); if (!peer) { icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt_nexthop(rt, ip_hdr(skb)->daddr)); return; } /* No redirected packets during ip_rt_redirect_silence; * reset the algorithm. */ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) peer->rate_tokens = 0; /* Too many ignored redirects; do not send anything * set dst.rate_last to the last seen redirected packet. */ if (peer->rate_tokens >= ip_rt_redirect_number) { peer->rate_last = jiffies; goto out_put_peer; } /* Check for load limit; set rate_last to the latest sent * redirect. */ if (peer->rate_tokens == 0 || time_after(jiffies, (peer->rate_last + (ip_rt_redirect_load << peer->rate_tokens)))) { __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); peer->rate_last = jiffies; ++peer->rate_tokens; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && peer->rate_tokens == ip_rt_redirect_number) net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", &ip_hdr(skb)->saddr, inet_iif(skb), &ip_hdr(skb)->daddr, &gw); #endif } out_put_peer: inet_putpeer(peer); } static int ip_error(struct sk_buff *skb) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct rtable *rt = skb_rtable(skb); struct inet_peer *peer; unsigned long now; struct net *net; bool send; int code; net = dev_net(rt->dst.dev); if (!IN_DEV_FORWARD(in_dev)) { switch (rt->dst.error) { case EHOSTUNREACH: IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS); break; case ENETUNREACH: IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); break; } goto out; } switch (rt->dst.error) { case EINVAL: default: goto out; case EHOSTUNREACH: code = ICMP_HOST_UNREACH; break; case ENETUNREACH: code = ICMP_NET_UNREACH; IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); break; case EACCES: code = ICMP_PKT_FILTERED; break; } peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); send = true; if (peer) { now = jiffies; peer->rate_tokens += now - peer->rate_last; if (peer->rate_tokens > ip_rt_error_burst) peer->rate_tokens = ip_rt_error_burst; peer->rate_last = now; if (peer->rate_tokens >= ip_rt_error_cost) peer->rate_tokens -= ip_rt_error_cost; else send = false; inet_putpeer(peer); } if (send) icmp_send(skb, ICMP_DEST_UNREACH, code, 0); out: kfree_skb(skb); return 0; } static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; struct fib_result res; if (dst_metric_locked(dst, RTAX_MTU)) return; if (dst->dev->mtu < mtu) return; if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; if (rt->rt_pmtu == mtu && time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) return; rcu_read_lock(); if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, jiffies + ip_rt_mtu_expires); } rcu_read_unlock(); } static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { struct rtable *rt = (struct rtable *) dst; struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); __ip_rt_update_pmtu(rt, &fl4, mtu); } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, u32 mark, u8 protocol, int flow_flags) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; if (!mark) mark = IP4_REPLY_MARK(net, skb->mark); __build_flow_key(&fl4, NULL, iph, oif, RT_TOS(iph->tos), protocol, mark, flow_flags); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_update_pmtu); static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); if (!fl4.flowi4_mark) fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark); rt = __ip_route_output_key(sock_net(sk), &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); ip_rt_put(rt); } } void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; struct dst_entry *odst = NULL; bool new = false; bh_lock_sock(sk); if (!ip_sk_accept_pmtu(sk)) goto out; odst = sk_dst_get(sk); if (sock_owned_by_user(sk) || !odst) { __ipv4_sk_update_pmtu(skb, sk, mtu); goto out; } __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); rt = (struct rtable *)odst; if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) goto out; new = true; } __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); if (!dst_check(&rt->dst, 0)) { if (new) dst_release(&rt->dst); rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) goto out; new = true; } if (new) sk_dst_set(sk, &rt->dst); out: bh_unlock_sock(sk); dst_release(odst); } EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, u8 protocol, int flow_flags) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, NULL, iph, oif, RT_TOS(iph->tos), protocol, mark, flow_flags); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_redirect); void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); rt = __ip_route_output_key(sock_net(sk), &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); ip_rt_put(rt); } } EXPORT_SYMBOL_GPL(ipv4_sk_redirect); static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) { struct rtable *rt = (struct rtable *) dst; /* All IPV4 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. * * When a PMTU/redirect information update invalidates a route, * this is indicated by setting obsolete to DST_OBSOLETE_KILL or * DST_OBSOLETE_DEAD by dst_free(). */ if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) return NULL; return dst; } static void ipv4_link_failure(struct sk_buff *skb) { struct rtable *rt; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); rt = skb_rtable(skb); if (rt) dst_set_expires(&rt->dst, 0); } static int ip_rt_bug(struct sock *sk, struct sk_buff *skb) { pr_debug("%s: %pI4 -> %pI4, %s\n", __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, skb->dev ? skb->dev->name : "?"); kfree_skb(skb); WARN_ON(1); return 0; } /* We do not cache source address of outgoing interface, because it is used only by IP RR, TS and SRR options, so that it out of fast path. BTW remember: "addr" is allowed to be not aligned in IP options! */ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) { __be32 src; if (rt_is_output_route(rt)) src = ip_hdr(skb)->saddr; else { struct fib_result res; struct flowi4 fl4; struct iphdr *iph; iph = ip_hdr(skb); memset(&fl4, 0, sizeof(fl4)); fl4.daddr = iph->daddr; fl4.saddr = iph->saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = rt->dst.dev->ifindex; fl4.flowi4_iif = skb->dev->ifindex; fl4.flowi4_mark = skb->mark; rcu_read_lock(); if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); else src = inet_select_addr(rt->dst.dev, rt_nexthop(rt, iph->daddr), RT_SCOPE_UNIVERSE); rcu_read_unlock(); } memcpy(addr, &src, 4); } #ifdef CONFIG_IP_ROUTE_CLASSID static void set_class_tag(struct rtable *rt, u32 tag) { if (!(rt->dst.tclassid & 0xFFFF)) rt->dst.tclassid |= tag & 0xFFFF; if (!(rt->dst.tclassid & 0xFFFF0000)) rt->dst.tclassid |= tag & 0xFFFF0000; } #endif static unsigned int ipv4_default_advmss(const struct dst_entry *dst) { unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS); if (advmss == 0) { advmss = max_t(unsigned int, dst->dev->mtu - 40, ip_rt_min_advmss); if (advmss > 65535 - 40) advmss = 65535 - 40; } return advmss; } static unsigned int ipv4_mtu(const struct dst_entry *dst) { const struct rtable *rt = (const struct rtable *) dst; unsigned int mtu = rt->rt_pmtu; if (!mtu || time_after_eq(jiffies, rt->dst.expires)) mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) return mtu; mtu = dst->dev->mtu; if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { if (rt->rt_uses_gateway && mtu > 576) mtu = 576; } return min_t(unsigned int, mtu, IP_MAX_MTU); } static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); struct fib_nh_exception *fnhe; u32 hval; if (!hash) return NULL; hval = fnhe_hashfun(daddr); for (fnhe = rcu_dereference(hash[hval].chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (fnhe->fnhe_daddr == daddr) return fnhe; } return NULL; } static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, __be32 daddr) { bool ret = false; spin_lock_bh(&fnhe_lock); if (daddr == fnhe->fnhe_daddr) { struct rtable __rcu **porig; struct rtable *orig; int genid = fnhe_genid(dev_net(rt->dst.dev)); if (rt_is_input_route(rt)) porig = &fnhe->fnhe_rth_input; else porig = &fnhe->fnhe_rth_output; orig = rcu_dereference(*porig); if (fnhe->fnhe_genid != genid) { fnhe->fnhe_genid = genid; fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; fnhe_flush_routes(fnhe); orig = NULL; } fill_route_from_fnhe(rt, fnhe); if (!rt->rt_gateway) rt->rt_gateway = daddr; if (!(rt->dst.flags & DST_NOCACHE)) { rcu_assign_pointer(*porig, rt); if (orig) rt_free(orig); ret = true; } fnhe->fnhe_stamp = jiffies; } spin_unlock_bh(&fnhe_lock); return ret; } static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) { struct rtable *orig, *prev, **p; bool ret = true; if (rt_is_input_route(rt)) { p = (struct rtable **)&nh->nh_rth_input; } else { p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output); } orig = *p; prev = cmpxchg(p, orig, rt); if (prev == orig) { if (orig) rt_free(orig); } else ret = false; return ret; } static DEFINE_SPINLOCK(rt_uncached_lock); static LIST_HEAD(rt_uncached_list); static void rt_add_uncached_list(struct rtable *rt) { spin_lock_bh(&rt_uncached_lock); list_add_tail(&rt->rt_uncached, &rt_uncached_list); spin_unlock_bh(&rt_uncached_lock); } static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; if (!list_empty(&rt->rt_uncached)) { spin_lock_bh(&rt_uncached_lock); list_del(&rt->rt_uncached); spin_unlock_bh(&rt_uncached_lock); } } void rt_flush_dev(struct net_device *dev) { if (!list_empty(&rt_uncached_list)) { struct net *net = dev_net(dev); struct rtable *rt; spin_lock_bh(&rt_uncached_lock); list_for_each_entry(rt, &rt_uncached_list, rt_uncached) { if (rt->dst.dev != dev) continue; rt->dst.dev = net->loopback_dev; dev_hold(rt->dst.dev); dev_put(dev); } spin_unlock_bh(&rt_uncached_lock); } } static bool rt_cache_valid(const struct rtable *rt) { return rt && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && !rt_is_expired(rt); } static void rt_set_nexthop(struct rtable *rt, __be32 daddr, const struct fib_result *res, struct fib_nh_exception *fnhe, struct fib_info *fi, u16 type, u32 itag) { bool cached = false; if (fi) { struct fib_nh *nh = &FIB_RES_NH(*res); if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } dst_init_metrics(&rt->dst, fi->fib_metrics, true); #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif if (unlikely(fnhe)) cached = rt_bind_exception(rt, fnhe, daddr); else if (!(rt->dst.flags & DST_NOCACHE)) cached = rt_cache_route(nh, rt); if (unlikely(!cached)) { /* Routes we intend to cache in nexthop exception or * FIB nexthop have the DST_NOCACHE bit clear. * However, if we are unsuccessful at storing this * route into the cache we really need to set it. */ rt->dst.flags |= DST_NOCACHE; if (!rt->rt_gateway) rt->rt_gateway = daddr; rt_add_uncached_list(rt); } } else rt_add_uncached_list(rt); #ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_MULTIPLE_TABLES set_class_tag(rt, res->tclassid); #endif set_class_tag(rt, itag); #endif } static struct rtable *rt_dst_alloc(struct net_device *dev, bool nopolicy, bool noxfrm, bool will_cache) { return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) | (nopolicy ? DST_NOPOLICY : 0) | (noxfrm ? DST_NOXFRM : 0)); } /* called in rcu_read_lock() section */ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, int our) { struct rtable *rth; struct in_device *in_dev = __in_dev_get_rcu(dev); u32 itag = 0; int err; /* Primary sanity checks. */ if (in_dev == NULL) return -EINVAL; if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || skb->protocol != htons(ETH_P_IP)) goto e_inval; if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) if (ipv4_is_loopback(saddr)) goto e_inval; if (ipv4_is_zeronet(saddr)) { if (!ipv4_is_local_multicast(daddr)) goto e_inval; } else { err = fib_validate_source(skb, saddr, 0, tos, 0, dev, in_dev, &itag); if (err < 0) goto e_err; } rth = rt_dst_alloc(dev_net(dev)->loopback_dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); if (!rth) goto e_nobufs; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; #endif rth->dst.output = ip_rt_bug; rth->rt_genid = rt_genid_ipv4(dev_net(dev)); rth->rt_flags = RTCF_MULTICAST; rth->rt_type = RTN_MULTICAST; rth->rt_is_input= 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); if (our) { rth->dst.input= ip_local_deliver; rth->rt_flags |= RTCF_LOCAL; } #ifdef CONFIG_IP_MROUTE if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) rth->dst.input = ip_mr_input; #endif RT_CACHE_STAT_INC(in_slow_mc); skb_dst_set(skb, &rth->dst); return 0; e_nobufs: return -ENOBUFS; e_inval: return -EINVAL; e_err: return err; } static void ip_handle_martian_source(struct net_device *dev, struct in_device *in_dev, struct sk_buff *skb, __be32 daddr, __be32 saddr) { RT_CACHE_STAT_INC(in_martian_src); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { /* * RFC1812 recommendation, if source is martian, * the only hint is MAC header. */ pr_warn("martian source %pI4 from %pI4, on dev %s\n", &daddr, &saddr, dev->name); if (dev->hard_header_len && skb_mac_header_was_set(skb)) { print_hex_dump(KERN_WARNING, "ll header: ", DUMP_PREFIX_OFFSET, 16, 1, skb_mac_header(skb), dev->hard_header_len, true); } } #endif } /* called in rcu_read_lock() section */ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { struct fib_nh_exception *fnhe; struct rtable *rth; int err; struct in_device *out_dev; unsigned int flags = 0; bool do_cache; u32 itag = 0; /* get a working reference to the output device */ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); return -EINVAL; } err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), in_dev->dev, in_dev, &itag); if (err < 0) { ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); goto cleanup; } do_cache = res->fi && !itag; if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && (IN_DEV_SHARED_MEDIA(out_dev) || inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { flags |= RTCF_DOREDIRECT; do_cache = false; } if (skb->protocol != htons(ETH_P_IP)) { /* Not IP (i.e. ARP). Do not create route, if it is * invalid for proxy arp. DNAT routes are always valid. * * Proxy arp feature have been extended to allow, ARP * replies back to the same interface, to support * Private VLAN switch technologies. See arp.c. */ if (out_dev == in_dev && IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { err = -EINVAL; goto cleanup; } } fnhe = find_exception(&FIB_RES_NH(*res), daddr); if (do_cache) { if (fnhe != NULL) rth = rcu_dereference(fnhe->fnhe_rth_input); else rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); goto out; } } rth = rt_dst_alloc(out_dev->dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache); if (!rth) { err = -ENOBUFS; goto cleanup; } rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev)); rth->rt_flags = flags; rth->rt_type = res->type; rth->rt_is_input = 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); RT_CACHE_STAT_INC(in_slow_tot); rth->dst.input = ip_forward; rth->dst.output = ip_output; rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag); skb_dst_set(skb, &rth->dst); out: err = 0; cleanup: return err; } static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, const struct flowi4 *fl4, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) fib_select_multipath(res); #endif /* create a routing cache entry */ return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); } /* * NOTE. We drop all the packets that has local source * addresses, because every properly looped back packet * must have correct destination already attached by output routine. * * Such approach solves two big problems: * 1. Not simplex devices are handled properly. * 2. IP spoofing attempts are filtered with 100% of guarantee. * called with rcu_read_lock() */ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { struct fib_result res; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flowi4 fl4; unsigned int flags = 0; u32 itag = 0; struct rtable *rth; int err = -EINVAL; struct net *net = dev_net(dev); bool do_cache; /* IP on this device is disabled. */ if (!in_dev) goto out; /* Check for the most weird martians, which can be not detected by fib_lookup. */ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) goto martian_source; res.fi = NULL; if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) goto brd_input; /* Accept zero addresses only to limited broadcast; * I even do not know to fix it or not. Waiting for complains :-) */ if (ipv4_is_zeronet(saddr)) goto martian_source; if (ipv4_is_zeronet(daddr)) goto martian_destination; /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), * and call it once if daddr or/and saddr are loopback addresses */ if (ipv4_is_loopback(daddr)) { if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_destination; } else if (ipv4_is_loopback(saddr)) { if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_source; } /* * Now we are ready to route packet. */ fl4.flowi4_oif = 0; fl4.flowi4_iif = dev->ifindex; fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res); if (err != 0) { if (!IN_DEV_FORWARD(in_dev)) err = -EHOSTUNREACH; goto no_route; } if (res.type == RTN_BROADCAST) goto brd_input; if (res.type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; goto local_input; } if (!IN_DEV_FORWARD(in_dev)) { err = -EHOSTUNREACH; goto no_route; } if (res.type != RTN_UNICAST) goto martian_destination; err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos); out: return err; brd_input: if (skb->protocol != htons(ETH_P_IP)) goto e_inval; if (!ipv4_is_zeronet(saddr)) { err = fib_validate_source(skb, saddr, 0, tos, 0, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; } flags |= RTCF_BROADCAST; res.type = RTN_BROADCAST; RT_CACHE_STAT_INC(in_brd); local_input: do_cache = false; if (res.fi) { if (!itag) { rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); err = 0; goto out; } do_cache = true; } } rth = rt_dst_alloc(net->loopback_dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); if (!rth) goto e_nobufs; rth->dst.input= ip_local_deliver; rth->dst.output= ip_rt_bug; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; #endif rth->rt_genid = rt_genid_ipv4(net); rth->rt_flags = flags|RTCF_LOCAL; rth->rt_type = res.type; rth->rt_is_input = 1; rth->rt_iif = 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); RT_CACHE_STAT_INC(in_slow_tot); if (res.type == RTN_UNREACHABLE) { rth->dst.input= ip_error; rth->dst.error= -err; rth->rt_flags &= ~RTCF_LOCAL; } if (do_cache) { if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) { rth->dst.flags |= DST_NOCACHE; rt_add_uncached_list(rth); } } skb_dst_set(skb, &rth->dst); err = 0; goto out; no_route: RT_CACHE_STAT_INC(in_no_route); res.type = RTN_UNREACHABLE; goto local_input; /* * Do not cache martian addresses: they should be logged (RFC1812) */ martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev)) net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", &daddr, &saddr, dev->name); #endif e_inval: err = -EINVAL; goto out; e_nobufs: err = -ENOBUFS; goto out; martian_source: err = -EINVAL; martian_source_keep_err: ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); goto out; } int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { int res; rcu_read_lock(); /* Multicast recognition logic is moved from route cache to here. The problem was that too many Ethernet cards have broken/missing hardware multicast filters :-( As result the host on multicasting network acquires a lot of useless route cache entries, sort of SDR messages from all the world. Now we try to get rid of them. Really, provided software IP multicast filter is organized reasonably (at least, hashed), it does not result in a slowdown comparing with route cache reject entries. Note, that multicast routers are not affected, because route cache entry is created eventually. */ if (ipv4_is_multicast(daddr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev) { int our = ip_check_mc_rcu(in_dev, daddr, saddr, ip_hdr(skb)->protocol); if (our #ifdef CONFIG_IP_MROUTE || (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) #endif ) { int res = ip_route_input_mc(skb, daddr, saddr, tos, dev, our); rcu_read_unlock(); return res; } } rcu_read_unlock(); return -EINVAL; } res = ip_route_input_slow(skb, daddr, saddr, tos, dev); rcu_read_unlock(); return res; } EXPORT_SYMBOL(ip_route_input_noref); /* called with rcu_read_lock() */ static struct rtable *__mkroute_output(const struct fib_result *res, const struct flowi4 *fl4, int orig_oif, struct net_device *dev_out, unsigned int flags) { struct fib_info *fi = res->fi; struct fib_nh_exception *fnhe; struct in_device *in_dev; u16 type = res->type; struct rtable *rth; bool do_cache; in_dev = __in_dev_get_rcu(dev_out); if (!in_dev) return ERR_PTR(-EINVAL); if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK)) return ERR_PTR(-EINVAL); if (ipv4_is_lbcast(fl4->daddr)) type = RTN_BROADCAST; else if (ipv4_is_multicast(fl4->daddr)) type = RTN_MULTICAST; else if (ipv4_is_zeronet(fl4->daddr)) return ERR_PTR(-EINVAL); if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; do_cache = true; if (type == RTN_BROADCAST) { flags |= RTCF_BROADCAST | RTCF_LOCAL; fi = NULL; } else if (type == RTN_MULTICAST) { flags |= RTCF_MULTICAST | RTCF_LOCAL; if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, fl4->flowi4_proto)) flags &= ~RTCF_LOCAL; else do_cache = false; /* If multicast route do not exist use * default one, but do not gateway in this case. * Yes, it is hack. */ if (fi && res->prefixlen < 4) fi = NULL; } fnhe = NULL; do_cache &= fi != NULL; if (do_cache) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); fnhe = find_exception(nh, fl4->daddr); if (fnhe) prth = &fnhe->fnhe_rth_output; else { if (unlikely(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH && !(nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK))) { do_cache = false; goto add; } prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); } rth = rcu_dereference(*prth); if (rt_cache_valid(rth)) { dst_hold(&rth->dst); return rth; } } add: rth = rt_dst_alloc(dev_out, IN_DEV_CONF_GET(in_dev, NOPOLICY), IN_DEV_CONF_GET(in_dev, NOXFRM), do_cache); if (!rth) return ERR_PTR(-ENOBUFS); rth->dst.output = ip_output; rth->rt_genid = rt_genid_ipv4(dev_net(dev_out)); rth->rt_flags = flags; rth->rt_type = type; rth->rt_is_input = 0; rth->rt_iif = orig_oif ? : 0; rth->rt_pmtu = 0; rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); RT_CACHE_STAT_INC(out_slow_tot); if (flags & RTCF_LOCAL) rth->dst.input = ip_local_deliver; if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) { rth->dst.output = ip_mc_output; RT_CACHE_STAT_INC(out_slow_mc); } #ifdef CONFIG_IP_MROUTE if (type == RTN_MULTICAST) { if (IN_DEV_MFORWARD(in_dev) && !ipv4_is_local_multicast(fl4->daddr)) { rth->dst.input = ip_mr_input; rth->dst.output = ip_mc_output; } } #endif } rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0); return rth; } /* * Major route resolver routine. */ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) { struct net_device *dev_out = NULL; __u8 tos = RT_FL_TOS(fl4); unsigned int flags = 0; struct fib_result res; struct rtable *rth; int orig_oif; res.tclassid = 0; res.fi = NULL; res.table = NULL; orig_oif = fl4->flowi4_oif; fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); rcu_read_lock(); if (fl4->saddr) { rth = ERR_PTR(-EINVAL); if (ipv4_is_multicast(fl4->saddr) || ipv4_is_lbcast(fl4->saddr) || ipv4_is_zeronet(fl4->saddr)) goto out; /* I removed check for oif == dev_out->oif here. It was wrong for two reasons: 1. ip_dev_find(net, saddr) can return wrong iface, if saddr is assigned to multiple interfaces. 2. Moreover, we are allowed to send packets with saddr of another iface. --ANK */ if (fl4->flowi4_oif == 0 && (ipv4_is_multicast(fl4->daddr) || ipv4_is_lbcast(fl4->daddr))) { /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ dev_out = __ip_dev_find(net, fl4->saddr, false); if (dev_out == NULL) goto out; /* Special hack: user can direct multicasts and limited broadcast via necessary interface without fiddling with IP_MULTICAST_IF or IP_PKTINFO. This hack is not just for fun, it allows vic,vat and friends to work. They bind socket to loopback, set ttl to zero and expect that it will work. From the viewpoint of routing cache they are broken, because we are not allowed to build multicast path with loopback source addr (look, routing cache cannot know, that ttl is zero, so that packet will not leave this host and route is valid). Luckily, this hack is good workaround. */ fl4->flowi4_oif = dev_out->ifindex; goto make_route; } if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ if (!__ip_dev_find(net, fl4->saddr, false)) goto out; } } if (fl4->flowi4_oif) { dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); rth = ERR_PTR(-ENODEV); if (dev_out == NULL) goto out; /* RACE: Check return value of inet_select_addr instead. */ if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { rth = ERR_PTR(-ENETUNREACH); goto out; } if (ipv4_is_local_multicast(fl4->daddr) || ipv4_is_lbcast(fl4->daddr)) { if (!fl4->saddr) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_LINK); goto make_route; } if (!fl4->saddr) { if (ipv4_is_multicast(fl4->daddr)) fl4->saddr = inet_select_addr(dev_out, 0, fl4->flowi4_scope); else if (!fl4->daddr) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_HOST); } } if (!fl4->daddr) { fl4->daddr = fl4->saddr; if (!fl4->daddr) fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); dev_out = net->loopback_dev; fl4->flowi4_oif = LOOPBACK_IFINDEX; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; } if (fib_lookup(net, fl4, &res)) { res.fi = NULL; res.table = NULL; if (fl4->flowi4_oif) { /* Apparently, routing tables are wrong. Assume, that the destination is on link. WHY? DW. Because we are allowed to send to iface even if it has NO routes and NO assigned addresses. When oif is specified, routing tables are looked up with only one purpose: to catch if destination is gatewayed, rather than direct. Moreover, if MSG_DONTROUTE is set, we send packet, ignoring both routing tables and ifaddr state. --ANK We could make it even if oif is unknown, likely IPv6, but we do not. */ if (fl4->saddr == 0) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_LINK); res.type = RTN_UNICAST; goto make_route; } rth = ERR_PTR(-ENETUNREACH); goto out; } if (res.type == RTN_LOCAL) { if (!fl4->saddr) { if (res.fi->fib_prefsrc) fl4->saddr = res.fi->fib_prefsrc; else fl4->saddr = fl4->daddr; } dev_out = net->loopback_dev; fl4->flowi4_oif = dev_out->ifindex; flags |= RTCF_LOCAL; goto make_route; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0) fib_select_multipath(&res); else #endif if (!res.prefixlen && res.table->tb_num_default > 1 && res.type == RTN_UNICAST && !fl4->flowi4_oif) fib_select_default(&res); if (!fl4->saddr) fl4->saddr = FIB_RES_PREFSRC(net, res); dev_out = FIB_RES_DEV(res); fl4->flowi4_oif = dev_out->ifindex; make_route: rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags); out: rcu_read_unlock(); return rth; } EXPORT_SYMBOL_GPL(__ip_route_output_key); static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) { return NULL; } static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst->dev->mtu; } static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { } static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { } static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) { return NULL; } static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_blackhole_dst_check, .mtu = ipv4_blackhole_mtu, .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .redirect = ipv4_rt_blackhole_redirect, .cow_metrics = ipv4_rt_blackhole_cow_metrics, .neigh_lookup = ipv4_neigh_lookup, }; struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) { struct rtable *ort = (struct rtable *) dst_orig; struct rtable *rt; rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); if (rt) { struct dst_entry *new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_sk; new->dev = ort->dst.dev; if (new->dev) dev_hold(new->dev); rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; rt->rt_pmtu = ort->rt_pmtu; rt->rt_genid = rt_genid_ipv4(net); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; rt->rt_gateway = ort->rt_gateway; rt->rt_uses_gateway = ort->rt_uses_gateway; INIT_LIST_HEAD(&rt->rt_uncached); dst_free(new); } dst_release(dst_orig); return rt ? &rt->dst : ERR_PTR(-ENOMEM); } struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, struct sock *sk) { struct rtable *rt = __ip_route_output_key(net, flp4); if (IS_ERR(rt)) return rt; if (flp4->flowi4_proto) rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, flowi4_to_flowi(flp4), sk, 0); return rt; } EXPORT_SYMBOL_GPL(ip_route_output_flow); static int rt_fill_info(struct net *net, __be32 dst, __be32 src, struct flowi4 *fl4, struct sk_buff *skb, u32 portid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb_rtable(skb); struct rtmsg *r; struct nlmsghdr *nlh; unsigned long expires = 0; u32 error; u32 metrics[RTAX_MAX]; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); if (nlh == NULL) return -EMSGSIZE; r = nlmsg_data(nlh); r->rtm_family = AF_INET; r->rtm_dst_len = 32; r->rtm_src_len = 0; r->rtm_tos = fl4->flowi4_tos; r->rtm_table = RT_TABLE_MAIN; if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) goto nla_put_failure; r->rtm_type = rt->rt_type; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; if (nla_put_be32(skb, RTA_DST, dst)) goto nla_put_failure; if (src) { r->rtm_src_len = 32; if (nla_put_be32(skb, RTA_SRC, src)) goto nla_put_failure; } if (rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID if (rt->dst.tclassid && nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) goto nla_put_failure; #endif if (!rt_is_input_route(rt) && fl4->saddr != src) { if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) goto nla_put_failure; } if (rt->rt_uses_gateway && nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) goto nla_put_failure; expires = rt->dst.expires; if (expires) { unsigned long now = jiffies; if (time_before(now, expires)) expires -= now; else expires = 0; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt_pmtu && expires) metrics[RTAX_MTU - 1] = rt->rt_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (fl4->flowi4_mark && nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) goto nla_put_failure; error = rt->dst.error; if (rt_is_input_route(rt)) { #ifdef CONFIG_IP_MROUTE if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, r, nowait); if (err <= 0) { if (!nowait) { if (err == 0) return 0; goto nla_put_failure; } else { if (err == -EMSGSIZE) goto nla_put_failure; error = err; } } } else #endif if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex)) goto nla_put_failure; } if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; struct rtable *rt = NULL; struct flowi4 fl4; __be32 dst = 0; __be32 src = 0; u32 iif; int err; int mark; struct sk_buff *skb; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); if (err < 0) goto errout; rtm = nlmsg_data(nlh); skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) { err = -ENOBUFS; goto errout; } /* Reserve room for dummy headers, this skb can pass through good chunk of routing engine. */ skb_reset_mac_header(skb); skb_reset_network_header(skb); /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ ip_hdr(skb)->protocol = IPPROTO_ICMP; skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst; fl4.saddr = src; fl4.flowi4_tos = rtm->rtm_tos; fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; fl4.flowi4_mark = mark; if (iif) { struct net_device *dev; dev = __dev_get_by_index(net, iif); if (dev == NULL) { err = -ENODEV; goto errout_free; } skb->protocol = htons(ETH_P_IP); skb->dev = dev; skb->mark = mark; local_bh_disable(); err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); local_bh_enable(); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) err = -rt->dst.error; } else { rt = ip_route_output_key(net, &fl4); err = 0; if (IS_ERR(rt)) err = PTR_ERR(rt); } if (err) goto errout_free; skb_dst_set(skb, &rt->dst); if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; err = rt_fill_info(net, dst, src, &fl4, skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err <= 0) goto errout_free; err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; errout_free: kfree_skb(skb); goto errout; } void ip_rt_multicast_event(struct in_device *in_dev) { rt_cache_flush(dev_net(in_dev->dev)); } #ifdef CONFIG_SYSCTL static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_gc_elasticity __read_mostly = 8; static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = (struct net *)__ctl->extra1; if (write) { rt_cache_flush(net); fnhe_genid_bump(net); return 0; } return -EINVAL; } static struct ctl_table ipv4_route_table[] = { { .procname = "gc_thresh", .data = &ipv4_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_size", .data = &ip_rt_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { /* Deprecated. Use gc_min_interval_ms */ .procname = "gc_min_interval", .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_min_interval_ms", .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { .procname = "gc_timeout", .data = &ip_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_interval", .data = &ip_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "redirect_load", .data = &ip_rt_redirect_load, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "redirect_number", .data = &ip_rt_redirect_number, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "redirect_silence", .data = &ip_rt_redirect_silence, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "error_cost", .data = &ip_rt_error_cost, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "error_burst", .data = &ip_rt_error_burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_elasticity", .data = &ip_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mtu_expires", .data = &ip_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "min_pmtu", .data = &ip_rt_min_pmtu, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "min_adv_mss", .data = &ip_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_table ipv4_route_flush_table[] = { { .procname = "flush", .maxlen = sizeof(int), .mode = 0200, .proc_handler = ipv4_sysctl_rtcache_flush, }, { }, }; static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; tbl = ipv4_route_flush_table; if (!net_eq(net, &init_net)) { tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); if (tbl == NULL) goto err_dup; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) tbl[0].procname = NULL; } tbl[0].extra1 = net; net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); if (net->ipv4.route_hdr == NULL) goto err_reg; return 0; err_reg: if (tbl != ipv4_route_flush_table) kfree(tbl); err_dup: return -ENOMEM; } static __net_exit void sysctl_route_net_exit(struct net *net) { struct ctl_table *tbl; tbl = net->ipv4.route_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.route_hdr); BUG_ON(tbl == ipv4_route_flush_table); kfree(tbl); } static __net_initdata struct pernet_operations sysctl_route_ops = { .init = sysctl_route_net_init, .exit = sysctl_route_net_exit, }; #endif static __net_init int rt_genid_init(struct net *net) { atomic_set(&net->ipv4.rt_genid, 0); atomic_set(&net->fnhe_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; } static __net_initdata struct pernet_operations rt_genid_ops = { .init = rt_genid_init, }; static int __net_init ipv4_inetpeer_init(struct net *net) { struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; inet_peer_base_init(bp); net->ipv4.peers = bp; return 0; } static void __net_exit ipv4_inetpeer_exit(struct net *net) { struct inet_peer_base *bp = net->ipv4.peers; net->ipv4.peers = NULL; inetpeer_invalidate_tree(bp); kfree(bp); } static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { .init = ipv4_inetpeer_init, .exit = ipv4_inetpeer_exit, }; #ifdef CONFIG_IP_ROUTE_CLASSID struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; #endif /* CONFIG_IP_ROUTE_CLASSID */ int __init ip_rt_init(void) { int rc = 0; ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); if (!ip_idents) panic("IP: failed to allocate ip_idents\n"); prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); #ifdef CONFIG_IP_ROUTE_CLASSID ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); if (!ip_rt_acct) panic("IP: failed to allocate ip_rt_acct\n"); #endif ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; if (dst_entries_init(&ipv4_dst_ops) < 0) panic("IP: failed to allocate ipv4_dst_ops counter\n"); if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); ipv4_dst_ops.gc_thresh = ~0; ip_rt_max_size = INT_MAX; devinet_init(); ip_fib_init(); if (ip_rt_proc_init()) pr_err("Unable to create route proc files\n"); #ifdef CONFIG_XFRM xfrm_init(); xfrm4_init(); #endif rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); #endif register_pernet_subsys(&rt_genid_ops); register_pernet_subsys(&ipv4_inetpeer_ops); return rc; } #ifdef CONFIG_SYSCTL /* * We really need to sanitize the damn ipv4 init order, then all * this nonsense will go away. */ void __init ip_static_sysctl_init(void) { register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); } #endif
Andiry/prd
net/ipv4/route.c
C
gpl-2.0
68,241
#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/device.h> #include <linux/kdev_t.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/mm_types.h> #include <linux/mm.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <asm/uaccess.h> #include <asm/page.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <mach/irqs.h> //#include <mach/x_define_irq.h> #include <linux/wait.h> #include <linux/proc_fs.h> #include <linux/semaphore.h> #include <mach/dma.h> #include <linux/delay.h> #include "mach/sync_write.h" //#include "mach/mt_reg_base.h" #if !defined(CONFIG_MTK_LEGACY) #include <linux/clk.h> #else /* defined(CONFIG_MTK_LEGACY) */ #include "mach/mt_clkmgr.h" #endif /* !defined(CONFIG_MTK_LEGACY) */ #ifdef CONFIG_MTK_HIBERNATION #include "mach/mtk_hibernate_dpm.h" #endif #include "videocodec_kernel_driver.h" #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/sizes.h> #include "val_types_private.h" #include "hal_types_private.h" #include "val_api_private.h" #include "val_log.h" #include "drv_api.h" #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #if IS_ENABLED(CONFIG_COMPAT) #include <linux/uaccess.h> #include <linux/compat.h> #endif #define ENABLE_MMDVFS_VDEC #ifdef ENABLE_MMDVFS_VDEC // <--- MM DVFS related #include <mt_smi.h> #define DROP_PERCENTAGE 50 #define RAISE_PERCENTAGE 90 #define MONITOR_DURATION_MS 4000 #define DVFS_LOW MMDVFS_VOLTAGE_LOW #define DVFS_HIGH MMDVFS_VOLTAGE_HIGH #define DVFS_DEFAULT MMDVFS_VOLTAGE_HIGH #define MONITOR_START_MINUS_1 0 #define SW_OVERHEAD_MS 1 static VAL_BOOL_T gMMDFVFSMonitorStarts = VAL_FALSE; static VAL_BOOL_T gFirstDvfsLock = VAL_FALSE; static VAL_UINT32_T gMMDFVFSMonitorCounts = 0; static VAL_TIME_T gMMDFVFSMonitorStartTime; static VAL_TIME_T gMMDFVFSLastLockTime; static VAL_TIME_T gMMDFVFSMonitorEndTime; static VAL_UINT32_T gHWLockInterval = 0; static VAL_INT32_T gHWLockMaxDuration = 0; VAL_UINT32_T TimeDiffMs(VAL_TIME_T timeOld, VAL_TIME_T timeNew) { //MFV_LOGE ("@@ timeOld(%d, %d), timeNew(%d, %d)", timeOld.u4Sec, timeOld.u4uSec, timeNew.u4Sec, timeNew.u4uSec); return (((((timeNew.u4Sec - timeOld.u4Sec) * 1000000) + timeNew.u4uSec) - timeOld.u4uSec) / 1000); } // raise/drop voltage void SendDvfsRequest(int level) { int ret = 0; if (level == MMDVFS_VOLTAGE_LOW) { MFV_LOGE("[VCODEC][MMDVFS_VDEC] SendDvfsRequest(MMDVFS_VOLTAGE_LOW)"); #if defined(CONFIG_MTK_LEGACY) clkmux_sel(MT_MUX_VDEC, 3, "MMDVFS_VOLTAGE_LOW"); // 136.5MHz #else // #if !defined(CONFIG_MTK_LEGACY) // [ToDo] #endif // #if defined(CONFIG_MTK_LEGACY) ret = mmdvfs_set_step(SMI_BWC_SCEN_VP, MMDVFS_VOLTAGE_LOW); } else if (level == MMDVFS_VOLTAGE_HIGH) { MFV_LOGE("[VCODEC][MMDVFS_VDEC] SendDvfsRequest(MMDVFS_VOLTAGE_HIGH)"); ret = mmdvfs_set_step(SMI_BWC_SCEN_VP, MMDVFS_VOLTAGE_HIGH); #if defined(CONFIG_MTK_LEGACY) clkmux_sel(MT_MUX_VDEC, 1, "MMDVFS_VOLTAGE_HIGH"); // 273MHz #else // #if !defined(CONFIG_MTK_LEGACY) // [ToDo] #endif // #if defined(CONFIG_MTK_LEGACY) } else { MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ OOPS: level = %d\n", level); } if (0 != ret) { MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ OOPS: mmdvfs_set_step error!"); } } void VdecDvfsBegin(void) { gMMDFVFSMonitorStarts = VAL_TRUE; gMMDFVFSMonitorCounts = 0; gHWLockInterval = 0; gFirstDvfsLock = VAL_TRUE; gHWLockMaxDuration = 0; MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ VdecDvfsBegin"); //eVideoGetTimeOfDay(&gMMDFVFSMonitorStartTime, sizeof(VAL_TIME_T)); } VAL_UINT32_T VdecDvfsGetMonitorDuration(void) { eVideoGetTimeOfDay(&gMMDFVFSMonitorEndTime, sizeof(VAL_TIME_T)); return TimeDiffMs(gMMDFVFSMonitorStartTime, gMMDFVFSMonitorEndTime); } void VdecDvfsEnd(int level) { MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ VdecDVFS monitor %dms, decoded %d frames, total time %d, max duration %d, target lv %d", MONITOR_DURATION_MS, gMMDFVFSMonitorCounts, gHWLockInterval, gHWLockMaxDuration, level); gMMDFVFSMonitorStarts = VAL_FALSE; gMMDFVFSMonitorCounts = 0; gHWLockInterval = 0; gHWLockMaxDuration = 0; } VAL_UINT32_T VdecDvfsStep(void) { VAL_TIME_T _now; VAL_UINT32_T _diff = 0; eVideoGetTimeOfDay(&_now, sizeof(VAL_TIME_T)); _diff = TimeDiffMs(gMMDFVFSLastLockTime, _now); if (_diff > gHWLockMaxDuration) { gHWLockMaxDuration = _diff; } gHWLockInterval += (_diff + SW_OVERHEAD_MS); return _diff; } // ---> #endif #define VDO_HW_WRITE(ptr,data) mt_reg_sync_writel(data,ptr) #define VDO_HW_READ(ptr) (*((volatile unsigned int * const)(ptr))) #define VCODEC_DEVNAME "Vcodec" #define VCODEC_DEV_MAJOR_NUMBER 160 //189 //#define VENC_USE_L2C static dev_t vcodec_devno = MKDEV(VCODEC_DEV_MAJOR_NUMBER, 0); static struct cdev *vcodec_cdev; static struct class *vcodec_class = NULL; static struct device *vcodec_device = NULL; #if !defined(CONFIG_MTK_LEGACY) static struct clk *clk_MT_CG_DISP0_SMI_COMMON; /* MM_DISP0_SMI_COMMON */ static struct clk *clk_MT_CG_VDEC0_VDEC; /* VDEC0_VDEC */ static struct clk *clk_MT_CG_VDEC1_LARB; /* VDEC1_LARB */ static struct clk *clk_MT_CG_VENC_VENC; /* VENC_VENC */ static struct clk *clk_MT_CG_VENC_LARB; /* VENC_LARB */ #endif /* !defined(CONFIG_MTK_LEGACY) */ static DEFINE_MUTEX(IsOpenedLock); static DEFINE_MUTEX(PWRLock); static DEFINE_MUTEX(VdecHWLock); static DEFINE_MUTEX(VencHWLock); static DEFINE_MUTEX(EncEMILock); static DEFINE_MUTEX(L2CLock); static DEFINE_MUTEX(DecEMILock); static DEFINE_MUTEX(DriverOpenCountLock); static DEFINE_MUTEX(DecHWLockEventTimeoutLock); static DEFINE_MUTEX(EncHWLockEventTimeoutLock); static DEFINE_MUTEX(VdecPWRLock); static DEFINE_MUTEX(VencPWRLock); static DEFINE_SPINLOCK(DecIsrLock); static DEFINE_SPINLOCK(EncIsrLock); static DEFINE_SPINLOCK(LockDecHWCountLock); static DEFINE_SPINLOCK(LockEncHWCountLock); static DEFINE_SPINLOCK(DecISRCountLock); static DEFINE_SPINLOCK(EncISRCountLock); static VAL_EVENT_T DecHWLockEvent; //mutex : HWLockEventTimeoutLock static VAL_EVENT_T EncHWLockEvent; //mutex : HWLockEventTimeoutLock static VAL_EVENT_T DecIsrEvent; //mutex : HWLockEventTimeoutLock static VAL_EVENT_T EncIsrEvent; //mutex : HWLockEventTimeoutLock static VAL_INT32_T Driver_Open_Count; //mutex : DriverOpenCountLock static VAL_UINT32_T gu4PWRCounter = 0; //mutex : PWRLock static VAL_UINT32_T gu4EncEMICounter = 0; //mutex : EncEMILock static VAL_UINT32_T gu4DecEMICounter = 0; //mutex : DecEMILock static VAL_UINT32_T gu4L2CCounter = 0; //mutex : L2CLock static VAL_BOOL_T bIsOpened = VAL_FALSE; //mutex : IsOpenedLock static VAL_UINT32_T gu4HwVencIrqStatus = 0; //hardware VENC IRQ status (VP8/H264) static VAL_UINT32_T gu4VdecPWRCounter = 0; //mutex : VdecPWRLock static VAL_UINT32_T gu4VencPWRCounter = 0; //mutex : VencPWRLock static VAL_UINT32_T gLockTimeOutCount = 0; static VAL_UINT32_T gu4VdecLockThreadId = 0; //#define VCODEC_DEBUG #ifdef VCODEC_DEBUG #undef VCODEC_DEBUG #define VCODEC_DEBUG MFV_LOGE #undef MFV_LOGD #define MFV_LOGD MFV_LOGE #else #define VCODEC_DEBUG(...) #undef MFV_LOGD #define MFV_LOGD(...) #endif // VENC physical base address #undef VENC_BASE #define VENC_BASE 0x17002000 #define VENC_REGION 0x1000 // VDEC virtual base address #define VDEC_BASE_PHY 0x16000000 #define VDEC_REGION 0x29000 #define HW_BASE 0x7FFF000 #define HW_REGION 0x2000 #define INFO_BASE 0x10000000 #define INFO_REGION 0x1000 #if 0 #define VENC_IRQ_STATUS_addr VENC_BASE + 0x05C #define VENC_IRQ_ACK_addr VENC_BASE + 0x060 #define VENC_MP4_IRQ_ACK_addr VENC_BASE + 0x678 #define VENC_MP4_IRQ_STATUS_addr VENC_BASE + 0x67C #define VENC_ZERO_COEF_COUNT_addr VENC_BASE + 0x688 #define VENC_BYTE_COUNT_addr VENC_BASE + 0x680 #define VENC_MP4_IRQ_ENABLE_addr VENC_BASE + 0x668 #define VENC_MP4_STATUS_addr VENC_BASE + 0x664 #define VENC_MP4_MVQP_STATUS_addr VENC_BASE + 0x6E4 #endif #define VENC_IRQ_STATUS_SPS 0x1 #define VENC_IRQ_STATUS_PPS 0x2 #define VENC_IRQ_STATUS_FRM 0x4 #define VENC_IRQ_STATUS_DRAM 0x8 #define VENC_IRQ_STATUS_PAUSE 0x10 #define VENC_IRQ_STATUS_SWITCH 0x20 //#define VENC_PWR_FPGA // Cheng-Jung 20120621 VENC power physical base address (FPGA only, should use API) [ #ifdef VENC_PWR_FPGA #define CLK_CFG_0_addr 0x10000140 #define CLK_CFG_4_addr 0x10000150 #define VENC_PWR_addr 0x10006230 #define VENCSYS_CG_SET_addr 0x15000004 #define PWR_ONS_1_D 3 #define PWR_CKD_1_D 4 #define PWR_ONN_1_D 2 #define PWR_ISO_1_D 1 #define PWR_RST_0_D 0 #define PWR_ON_SEQ_0 ((0x1 << PWR_ONS_1_D) | (0x1 << PWR_CKD_1_D) | (0x1 << PWR_ONN_1_D) | (0x1 << PWR_ISO_1_D) | (0x0 << PWR_RST_0_D)) #define PWR_ON_SEQ_1 ((0x1 << PWR_ONS_1_D) | (0x0 << PWR_CKD_1_D) | (0x1 << PWR_ONN_1_D) | (0x1 << PWR_ISO_1_D) | (0x0 << PWR_RST_0_D)) #define PWR_ON_SEQ_2 ((0x1 << PWR_ONS_1_D) | (0x0 << PWR_CKD_1_D) | (0x1 << PWR_ONN_1_D) | (0x0 << PWR_ISO_1_D) | (0x0 << PWR_RST_0_D)) #define PWR_ON_SEQ_3 ((0x1 << PWR_ONS_1_D) | (0x0 << PWR_CKD_1_D) | (0x1 << PWR_ONN_1_D) | (0x0 << PWR_ISO_1_D) | (0x1 << PWR_RST_0_D)) // ] #endif #if 0 // VDEC virtual base address #define VDEC_MISC_BASE VDEC_BASE + 0x0000 #define VDEC_VLD_BASE VDEC_BASE + 0x1000 #endif VAL_ULONG_T KVA_VENC_IRQ_ACK_ADDR, KVA_VENC_IRQ_STATUS_ADDR, KVA_VENC_BASE; VAL_ULONG_T KVA_VDEC_MISC_BASE, KVA_VDEC_VLD_BASE, KVA_VDEC_BASE, KVA_VDEC_GCON_BASE; VAL_UINT32_T VENC_IRQ_ID, VDEC_IRQ_ID; #ifdef VENC_PWR_FPGA // Cheng-Jung 20120621 VENC power physical base address (FPGA only, should use API) [ VAL_ULONG_T KVA_VENC_CLK_CFG_0_ADDR, KVA_VENC_CLK_CFG_4_ADDR, KVA_VENC_PWR_ADDR, KVA_VENCSYS_CG_SET_ADDR; // ] #endif extern unsigned long pmem_user_v2p_video(unsigned long va); #if defined(VENC_USE_L2C) extern int config_L2(int option); #endif void vdec_power_on(void) { int ret; mutex_lock(&VdecPWRLock); gu4VdecPWRCounter++; mutex_unlock(&VdecPWRLock); #if defined(CONFIG_MTK_LEGACY) // Central power on enable_clock(MT_CG_DISP0_SMI_COMMON, "VDEC"); enable_clock(MT_CG_VDEC0_VDEC, "VDEC"); enable_clock(MT_CG_VDEC1_LARB, "VDEC"); #ifdef VDEC_USE_L2C //enable_clock(MT_CG_INFRA_L2C_SRAM, "VDEC"); #endif #else //#if !defined(CONFIG_MTK_LEGACY) ret = clk_prepare_enable(clk_MT_CG_DISP0_SMI_COMMON); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][vdec_power_on] clk_MT_CG_DISP0_SMI_COMMON is not enabled, ret = %d\n", ret); } ret = clk_prepare_enable(clk_MT_CG_VDEC0_VDEC); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][vdec_power_on] clk_MT_CG_VDEC0_VDEC is not enabled, ret = %d\n", ret); } ret = clk_prepare_enable(clk_MT_CG_VDEC1_LARB); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][vdec_power_on] clk_MT_CG_VDEC1_LARB is not enabled, ret = %d\n", ret); } #endif //#if defined(CONFIG_MTK_LEGACY) } void vdec_power_off(void) { mutex_lock(&VdecPWRLock); if (gu4VdecPWRCounter == 0) { } else { gu4VdecPWRCounter--; #if defined(CONFIG_MTK_LEGACY) // Central power off disable_clock(MT_CG_VDEC0_VDEC, "VDEC"); disable_clock(MT_CG_VDEC1_LARB, "VDEC"); disable_clock(MT_CG_DISP0_SMI_COMMON, "VDEC"); #ifdef VDEC_USE_L2C //disable_clock(MT_CG_INFRA_L2C_SRAM, "VDEC"); #endif #else //#if !defined(CONFIG_MTK_LEGACY) clk_disable_unprepare(clk_MT_CG_VDEC1_LARB); clk_disable_unprepare(clk_MT_CG_VDEC0_VDEC); clk_disable_unprepare(clk_MT_CG_DISP0_SMI_COMMON); #endif //#if defined(CONFIG_MTK_LEGACY) } mutex_unlock(&VdecPWRLock); } void venc_power_on(void) { int ret; mutex_lock(&VencPWRLock); gu4VencPWRCounter++; mutex_unlock(&VencPWRLock); MFV_LOGD("[VCODEC] venc_power_on +\n"); #if defined(CONFIG_MTK_LEGACY) enable_clock(MT_CG_DISP0_SMI_COMMON, "VENC"); enable_clock(MT_CG_VENC_VENC, "VENC"); enable_clock(MT_CG_VENC_LARB , "VENC"); #ifdef VENC_USE_L2C enable_clock(MT_CG_INFRA_L2C_SRAM, "VENC"); #endif #else //#if !defined(CONFIG_MTK_LEGACY) ret = clk_prepare_enable(clk_MT_CG_DISP0_SMI_COMMON); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][venc_power_on] clk_MT_CG_DISP0_SMI_COMMON is not enabled, ret = %d\n", ret); } ret = clk_prepare_enable(clk_MT_CG_VENC_VENC); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][venc_power_on] clk_MT_CG_VENC_VENC is not enabled, ret = %d\n", ret); } ret = clk_prepare_enable(clk_MT_CG_VENC_LARB); if (ret) { // print error log & error handling MFV_LOGE("[VCODEC][ERROR][venc_power_on] clk_MT_CG_VENC_LARB is not enabled, ret = %d\n", ret); } #endif //#if defined(CONFIG_MTK_LEGACY) MFV_LOGD("[VCODEC] venc_power_on -\n"); } void venc_power_off(void) { mutex_lock(&VencPWRLock); if (gu4VencPWRCounter == 0) { } else { gu4VencPWRCounter--; MFV_LOGD("[VCODEC] venc_power_off +\n"); #if defined(CONFIG_MTK_LEGACY) disable_clock(MT_CG_VENC_VENC, "VENC"); disable_clock(MT_CG_VENC_LARB, "VENC"); disable_clock(MT_CG_DISP0_SMI_COMMON, "VENC"); #ifdef VENC_USE_L2C disable_clock(MT_CG_INFRA_L2C_SRAM, "VENC"); #endif #else //#if !defined(CONFIG_MTK_LEGACY) clk_disable_unprepare(clk_MT_CG_VENC_LARB); clk_disable_unprepare(clk_MT_CG_VENC_VENC); clk_disable_unprepare(clk_MT_CG_DISP0_SMI_COMMON); #endif //#if defined(CONFIG_MTK_LEGACY) MFV_LOGD("[VCODEC] venc_power_off -\n"); } mutex_unlock(&VencPWRLock); } void dec_isr(void) { VAL_RESULT_T eValRet; VAL_ULONG_T ulFlags, ulFlagsISR, ulFlagsLockHW; VAL_UINT32_T u4TempDecISRCount = 0; VAL_UINT32_T u4TempLockDecHWCount = 0; VAL_UINT32_T u4CgStatus = 0; VAL_UINT32_T u4DecDoneStatus = 0; u4CgStatus = VDO_HW_READ(KVA_VDEC_GCON_BASE); if ((u4CgStatus & 0x10) != 0) { MFV_LOGE("[VCODEC][ERROR] DEC ISR, VDEC active is not 0x0 (0x%08x)", u4CgStatus); return; } u4DecDoneStatus = VDO_HW_READ(KVA_VDEC_BASE + 0xA4); if ((u4DecDoneStatus & (0x1 << 16)) != 0x10000) { MFV_LOGE("[VCODEC][ERROR] DEC ISR, Decode done status is not 0x1 (0x%08x)", u4DecDoneStatus); return; } spin_lock_irqsave(&DecISRCountLock, ulFlagsISR); gu4DecISRCount++; u4TempDecISRCount = gu4DecISRCount; spin_unlock_irqrestore(&DecISRCountLock, ulFlagsISR); spin_lock_irqsave(&LockDecHWCountLock, ulFlagsLockHW); u4TempLockDecHWCount = gu4LockDecHWCount; spin_unlock_irqrestore(&LockDecHWCountLock, ulFlagsLockHW); if (u4TempDecISRCount != u4TempLockDecHWCount) { //MFV_LOGE("[INFO] Dec ISRCount: 0x%x, LockHWCount:0x%x\n", u4TempDecISRCount, u4TempLockDecHWCount); } // Clear interrupt VDO_HW_WRITE(KVA_VDEC_MISC_BASE + 41 * 4, VDO_HW_READ(KVA_VDEC_MISC_BASE + 41 * 4) | 0x11); VDO_HW_WRITE(KVA_VDEC_MISC_BASE + 41 * 4, VDO_HW_READ(KVA_VDEC_MISC_BASE + 41 * 4) & ~0x10); spin_lock_irqsave(&DecIsrLock, ulFlags); eValRet = eVideoSetEvent(&DecIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValRet) { MFV_LOGE("[VCODEC][ERROR] ISR set DecIsrEvent error\n"); } spin_unlock_irqrestore(&DecIsrLock, ulFlags); return; } void enc_isr(void) { VAL_RESULT_T eValRet; VAL_ULONG_T ulFlagsISR, ulFlagsLockHW; VAL_UINT32_T u4TempEncISRCount = 0; VAL_UINT32_T u4TempLockEncHWCount = 0; //---------------------- spin_lock_irqsave(&EncISRCountLock, ulFlagsISR); gu4EncISRCount++; u4TempEncISRCount = gu4EncISRCount; spin_unlock_irqrestore(&EncISRCountLock, ulFlagsISR); spin_lock_irqsave(&LockEncHWCountLock, ulFlagsLockHW); u4TempLockEncHWCount = gu4LockEncHWCount; spin_unlock_irqrestore(&LockEncHWCountLock, ulFlagsLockHW); if (u4TempEncISRCount != u4TempLockEncHWCount) { //MFV_LOGE("[INFO] Enc ISRCount: 0x%x, LockHWCount:0x%x\n", u4TempEncISRCount, u4TempLockEncHWCount); } if (grVcodecEncHWLock.pvHandle == 0) { MFV_LOGE("[VCODEC][ERROR] NO one Lock Enc HW, please check!!\n"); // Clear all status //VDO_HW_WRITE(KVA_VENC_MP4_IRQ_ACK_ADDR, 1); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_PAUSE); //VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_DRAM_VP8); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_SWITCH); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_DRAM); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_SPS); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_PPS); VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_FRM); return; } if (grVcodecEncHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC) // hardwire { gu4HwVencIrqStatus = VDO_HW_READ(KVA_VENC_IRQ_STATUS_ADDR); if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_PAUSE) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_PAUSE); } if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_SWITCH) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_SWITCH); } if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_DRAM) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_DRAM); } if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_SPS) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_SPS); } if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_PPS) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_PPS); } if (gu4HwVencIrqStatus & VENC_IRQ_STATUS_FRM) { VDO_HW_WRITE(KVA_VENC_IRQ_ACK_ADDR, VENC_IRQ_STATUS_FRM); } } else if (grVcodecEncHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC) // hardwire { MFV_LOGE("[VCODEC][enc_isr] VAL_DRIVER_TYPE_HEVC_ENC!!\n"); } else { MFV_LOGE("[VCODEC][ERROR] Invalid lock holder driver type = %d\n", grVcodecEncHWLock.eDriverType); } eValRet = eVideoSetEvent(&EncIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValRet) { MFV_LOGE("[VCODEC][ERROR] ISR set EncIsrEvent error\n"); } } static irqreturn_t video_intr_dlr(int irq, void *dev_id) { dec_isr(); return IRQ_HANDLED; } static irqreturn_t video_intr_dlr2(int irq, void *dev_id) { enc_isr(); return IRQ_HANDLED; } static long vcodec_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { VAL_LONG_T ret; VAL_UINT8_T *user_data_addr; VAL_RESULT_T eValRet; VAL_ULONG_T ulFlags, ulFlagsLockHW; VAL_HW_LOCK_T rHWLock; VAL_BOOL_T bLockedHW = VAL_FALSE; VAL_UINT32_T FirstUseDecHW = 0; VAL_UINT32_T FirstUseEncHW = 0; VAL_TIME_T rCurTime; VAL_UINT32_T u4TimeInterval; VAL_ISR_T val_isr; VAL_VCODEC_CORE_LOADING_T rTempCoreLoading; VAL_VCODEC_CPU_OPP_LIMIT_T rCpuOppLimit; VAL_INT32_T temp_nr_cpu_ids; VAL_POWER_T rPowerParam; VAL_MEMORY_T rTempMem; #ifdef ENABLE_MMDVFS_VDEC VAL_UINT32_T _monitor_duration = 0; VAL_UINT32_T _diff = 0; VAL_UINT32_T _perc = 0; #endif #if 0 VCODEC_DRV_CMD_QUEUE_T rDrvCmdQueue; P_VCODEC_DRV_CMD_T cmd_queue = VAL_NULL; VAL_UINT32_T u4Size, uValue, nCount; #endif switch (cmd) { case VCODEC_SET_THREAD_ID: { MFV_LOGE("VCODEC_SET_THREAD_ID [EMPTY] + tid = %d\n", current->pid); MFV_LOGE("VCODEC_SET_THREAD_ID [EMPTY] - tid = %d\n", current->pid); } break; case VCODEC_ALLOC_NON_CACHE_BUFFER: { MFV_LOGE("VCODEC_ALLOC_NON_CACHE_BUFFER + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rTempMem, user_data_addr, sizeof(VAL_MEMORY_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_ALLOC_NON_CACHE_BUFFER, copy_from_user failed: %lu\n", ret); return -EFAULT; } rTempMem.u4ReservedSize /*kernel va*/ = (VAL_ULONG_T)dma_alloc_coherent(0, rTempMem.u4MemSize, (dma_addr_t *)&rTempMem.pvMemPa, GFP_KERNEL); if ((0 == rTempMem.u4ReservedSize) || (0 == rTempMem.pvMemPa)) { MFV_LOGE("[ERROR] dma_alloc_coherent fail in VCODEC_ALLOC_NON_CACHE_BUFFER\n"); return -EFAULT; } MFV_LOGD("[VCODEC] kernel va = 0x%lx, kernel pa = 0x%lx, memory size = %lu\n", (VAL_ULONG_T)rTempMem.u4ReservedSize, (VAL_ULONG_T)rTempMem.pvMemPa, (VAL_ULONG_T)rTempMem.u4MemSize); //mutex_lock(&NonCacheMemoryListLock); //Add_NonCacheMemoryList(rTempMem.u4ReservedSize, (VAL_UINT32_T)rTempMem.pvMemPa, (VAL_UINT32_T)rTempMem.u4MemSize, 0, 0); //mutex_unlock(&NonCacheMemoryListLock); ret = copy_to_user(user_data_addr, &rTempMem, sizeof(VAL_MEMORY_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_ALLOC_NON_CACHE_BUFFER, copy_to_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGE("VCODEC_ALLOC_NON_CACHE_BUFFER - tid = %d\n", current->pid); } break; case VCODEC_FREE_NON_CACHE_BUFFER: { MFV_LOGE("VCODEC_FREE_NON_CACHE_BUFFER + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rTempMem, user_data_addr, sizeof(VAL_MEMORY_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_FREE_NON_CACHE_BUFFER, copy_from_user failed: %lu\n", ret); return -EFAULT; } dma_free_coherent(0, rTempMem.u4MemSize, (void *)rTempMem.u4ReservedSize, (dma_addr_t)rTempMem.pvMemPa); //mutex_lock(&NonCacheMemoryListLock); //Free_NonCacheMemoryList(rTempMem.u4ReservedSize, (VAL_UINT32_T)rTempMem.pvMemPa); //mutex_unlock(&NonCacheMemoryListLock); rTempMem.u4ReservedSize = 0; rTempMem.pvMemPa = NULL; ret = copy_to_user(user_data_addr, &rTempMem, sizeof(VAL_MEMORY_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_FREE_NON_CACHE_BUFFER, copy_to_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGE("VCODEC_FREE_NON_CACHE_BUFFER - tid = %d\n", current->pid); } break; case VCODEC_INC_DEC_EMI_USER: { MFV_LOGD("VCODEC_INC_DEC_EMI_USER + tid = %d\n", current->pid); mutex_lock(&DecEMILock); gu4DecEMICounter++; MFV_LOGE("[VCODEC] DEC_EMI_USER = %d\n", gu4DecEMICounter); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_to_user(user_data_addr, &gu4DecEMICounter, sizeof(VAL_UINT32_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_INC_DEC_EMI_USER, copy_to_user failed: %lu\n", ret); mutex_unlock(&DecEMILock); return -EFAULT; } mutex_unlock(&DecEMILock); #ifdef ENABLE_MMDVFS_VDEC // MM DVFS related MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ INC_DEC_EMI MM DVFS init"); // raise voltage SendDvfsRequest(DVFS_DEFAULT); VdecDvfsBegin(); #endif MFV_LOGD("VCODEC_INC_DEC_EMI_USER - tid = %d\n", current->pid); } break; case VCODEC_DEC_DEC_EMI_USER: { MFV_LOGD("VCODEC_DEC_DEC_EMI_USER + tid = %d\n", current->pid); mutex_lock(&DecEMILock); gu4DecEMICounter--; MFV_LOGE("[VCODEC] DEC_EMI_USER = %d\n", gu4DecEMICounter); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_to_user(user_data_addr, &gu4DecEMICounter, sizeof(VAL_UINT32_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_DEC_DEC_EMI_USER, copy_to_user failed: %lu\n", ret); mutex_unlock(&DecEMILock); return -EFAULT; } mutex_unlock(&DecEMILock); MFV_LOGD("VCODEC_DEC_DEC_EMI_USER - tid = %d\n", current->pid); } break; case VCODEC_INC_ENC_EMI_USER: { MFV_LOGD("VCODEC_INC_ENC_EMI_USER + tid = %d\n", current->pid); mutex_lock(&EncEMILock); gu4EncEMICounter++; MFV_LOGE("[VCODEC] ENC_EMI_USER = %d\n", gu4EncEMICounter); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_to_user(user_data_addr, &gu4EncEMICounter, sizeof(VAL_UINT32_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_INC_ENC_EMI_USER, copy_to_user failed: %lu\n", ret); mutex_unlock(&EncEMILock); return -EFAULT; } mutex_unlock(&EncEMILock); MFV_LOGD("VCODEC_INC_ENC_EMI_USER - tid = %d\n", current->pid); } break; case VCODEC_DEC_ENC_EMI_USER: { MFV_LOGD("VCODEC_DEC_ENC_EMI_USER + tid = %d\n", current->pid); mutex_lock(&EncEMILock); gu4EncEMICounter--; MFV_LOGE("[VCODEC] ENC_EMI_USER = %d\n", gu4EncEMICounter); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_to_user(user_data_addr, &gu4EncEMICounter, sizeof(VAL_UINT32_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_DEC_ENC_EMI_USER, copy_to_user failed: %lu\n", ret); mutex_unlock(&EncEMILock); return -EFAULT; } mutex_unlock(&EncEMILock); MFV_LOGD("VCODEC_DEC_ENC_EMI_USER - tid = %d\n", current->pid); } break; case VCODEC_LOCKHW: { MFV_LOGD("VCODEC_LOCKHW + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rHWLock, user_data_addr, sizeof(VAL_HW_LOCK_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_LOCKHW, copy_from_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("[VCODEC] LOCKHW eDriverType = %d\n", rHWLock.eDriverType); eValRet = VAL_RESULT_INVALID_ISR; if (rHWLock.eDriverType == VAL_DRIVER_TYPE_MP4_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_MP1_MP2_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VC1_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VC1_ADV_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VP8_DEC) { while (bLockedHW == VAL_FALSE) { mutex_lock(&DecHWLockEventTimeoutLock); if (DecHWLockEvent.u4TimeoutMs == 1) { MFV_LOGE("VCODEC_LOCKHW, First Use Dec HW!!\n"); FirstUseDecHW = 1; } else { FirstUseDecHW = 0; } mutex_unlock(&DecHWLockEventTimeoutLock); if (FirstUseDecHW == 1) { eValRet = eVideoWaitEvent(&DecHWLockEvent, sizeof(VAL_EVENT_T)); } mutex_lock(&DecHWLockEventTimeoutLock); if (DecHWLockEvent.u4TimeoutMs != 1000) { DecHWLockEvent.u4TimeoutMs = 1000; FirstUseDecHW = 1; } else { FirstUseDecHW = 0; } mutex_unlock(&DecHWLockEventTimeoutLock); mutex_lock(&VdecHWLock); // one process try to lock twice if (grVcodecDecHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle)) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, one decoder instance try to lock twice, may cause lock HW timeout!! instance = 0x%lx, CurrentTID = %d\n", (VAL_ULONG_T)grVcodecDecHWLock.pvHandle, current->pid); } mutex_unlock(&VdecHWLock); if (FirstUseDecHW == 0) { MFV_LOGD("VCODEC_LOCKHW, Not first time use HW, timeout = %d\n", DecHWLockEvent.u4TimeoutMs); eValRet = eVideoWaitEvent(&DecHWLockEvent, sizeof(VAL_EVENT_T)); } if (VAL_RESULT_INVALID_ISR == eValRet) { MFV_LOGE("[ERROR] VCODEC_LOCKHW, DecHWLockEvent TimeOut, CurrentTID = %d\n", current->pid); if (FirstUseDecHW != 1) { mutex_lock(&VdecHWLock); if (grVcodecDecHWLock.pvHandle == 0) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, maybe mediaserver restart before, please check!!\n"); } else { MFV_LOGE("[WARNING] VCODEC_LOCKHW, someone use HW, and check timeout value!!\n"); } mutex_unlock(&VdecHWLock); } } else if (VAL_RESULT_RESTARTSYS == eValRet) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, VAL_RESULT_RESTARTSYS return when HWLock!!\n"); return -ERESTARTSYS; } mutex_lock(&VdecHWLock); if (grVcodecDecHWLock.pvHandle == 0) // No one holds dec hw lock now { gu4VdecLockThreadId = current->pid; grVcodecDecHWLock.pvHandle = (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle); grVcodecDecHWLock.eDriverType = rHWLock.eDriverType; eVideoGetTimeOfDay(&grVcodecDecHWLock.rLockedTime, sizeof(VAL_TIME_T)); MFV_LOGD("VCODEC_LOCKHW, No process use dec HW, so current process can use HW\n"); MFV_LOGD("VCODEC_LOCKHW, LockInstance = 0x%lx CurrentTID = %d, rLockedTime(s, us) = %d, %d\n", (VAL_ULONG_T)grVcodecDecHWLock.pvHandle, current->pid, grVcodecDecHWLock.rLockedTime.u4Sec, grVcodecDecHWLock.rLockedTime.u4uSec); bLockedHW = VAL_TRUE; if (VAL_RESULT_INVALID_ISR == eValRet && FirstUseDecHW != 1) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, reset power/irq when HWLock!!\n"); vdec_power_off(); disable_irq(VDEC_IRQ_ID); } vdec_power_on(); if (rHWLock.bSecureInst == VAL_FALSE) { enable_irq(VDEC_IRQ_ID); } #ifdef ENABLE_MMDVFS_VDEC // MM DVFS related if (VAL_FALSE == gMMDFVFSMonitorStarts) { // Continous monitoring VdecDvfsBegin(); } if (VAL_TRUE == gMMDFVFSMonitorStarts) { MFV_LOGD("[VCODEC][MMDVFS_VDEC] @@ LOCK 1"); if (gMMDFVFSMonitorCounts > MONITOR_START_MINUS_1) { if (VAL_TRUE == gFirstDvfsLock) { gFirstDvfsLock = VAL_FALSE; MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ LOCK 1 start monitor"); eVideoGetTimeOfDay(&gMMDFVFSMonitorStartTime, sizeof(VAL_TIME_T)); } eVideoGetTimeOfDay(&gMMDFVFSLastLockTime, sizeof(VAL_TIME_T)); } } #endif } else // Another one holding dec hw now { MFV_LOGE("VCODEC_LOCKHW E\n"); eVideoGetTimeOfDay(&rCurTime, sizeof(VAL_TIME_T)); u4TimeInterval = (((((rCurTime.u4Sec - grVcodecDecHWLock.rLockedTime.u4Sec) * 1000000) + rCurTime.u4uSec) - grVcodecDecHWLock.rLockedTime.u4uSec) / 1000); MFV_LOGD("VCODEC_LOCKHW, someone use dec HW, and check timeout value\n"); MFV_LOGD("VCODEC_LOCKHW, Instance = 0x%lx CurrentTID = %d, TimeInterval(ms) = %d, TimeOutValue(ms)) = %d\n", (VAL_ULONG_T)grVcodecDecHWLock.pvHandle, current->pid, u4TimeInterval, rHWLock.u4TimeoutMs); MFV_LOGE("VCODEC_LOCKHW Lock Instance = 0x%lx, Lock TID = %d, CurrentTID = %d, rLockedTime(%d s, %d us), rCurTime(%d s, %d us)\n", (VAL_ULONG_T)grVcodecDecHWLock.pvHandle, gu4VdecLockThreadId, current->pid, grVcodecDecHWLock.rLockedTime.u4Sec, grVcodecDecHWLock.rLockedTime.u4uSec, rCurTime.u4Sec, rCurTime.u4uSec ); // 2012/12/16. Cheng-Jung Never steal hardware lock if (0) //if (u4TimeInterval >= rHWLock.u4TimeoutMs) { grVcodecDecHWLock.pvHandle = (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle); grVcodecDecHWLock.eDriverType = rHWLock.eDriverType; eVideoGetTimeOfDay(&grVcodecDecHWLock.rLockedTime, sizeof(VAL_TIME_T)); bLockedHW = VAL_TRUE; vdec_power_on(); // TODO: Error handling, VDEC break, reset? } } mutex_unlock(&VdecHWLock); spin_lock_irqsave(&LockDecHWCountLock, ulFlagsLockHW); gu4LockDecHWCount++; spin_unlock_irqrestore(&LockDecHWCountLock, ulFlagsLockHW); } } else if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_JPEG_ENC) { while (bLockedHW == VAL_FALSE) { // Early break for JPEG VENC if (rHWLock.u4TimeoutMs == 0) { if (grVcodecEncHWLock.pvHandle != 0) { break; } } // Wait to acquire Enc HW lock mutex_lock(&EncHWLockEventTimeoutLock); if (EncHWLockEvent.u4TimeoutMs == 1) { MFV_LOGE("VCODEC_LOCKHW, First Use Enc HW %d!!\n", rHWLock.eDriverType); FirstUseEncHW = 1; } else { FirstUseEncHW = 0; } mutex_unlock(&EncHWLockEventTimeoutLock); if (FirstUseEncHW == 1) { eValRet = eVideoWaitEvent(&EncHWLockEvent, sizeof(VAL_EVENT_T)); } mutex_lock(&EncHWLockEventTimeoutLock); if (EncHWLockEvent.u4TimeoutMs == 1) { EncHWLockEvent.u4TimeoutMs = 1000; FirstUseEncHW = 1; } else { FirstUseEncHW = 0; if (rHWLock.u4TimeoutMs == 0) { EncHWLockEvent.u4TimeoutMs = 0; // No wait } else { EncHWLockEvent.u4TimeoutMs = 1000; // Wait indefinitely } } mutex_unlock(&EncHWLockEventTimeoutLock); mutex_lock(&VencHWLock); // one process try to lock twice if (grVcodecEncHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle)) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, one encoder instance try to lock twice, may cause lock HW timeout!! instance = 0x%lx, CurrentTID = %d, type:%d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, current->pid, rHWLock.eDriverType); } mutex_unlock(&VencHWLock); if (FirstUseEncHW == 0) { eValRet = eVideoWaitEvent(&EncHWLockEvent, sizeof(VAL_EVENT_T)); } if (VAL_RESULT_INVALID_ISR == eValRet) { MFV_LOGE("[ERROR] VCODEC_LOCKHW EncHWLockEvent TimeOut, CurrentTID = %d\n", current->pid); if (FirstUseEncHW != 1) { mutex_lock(&VencHWLock); if (grVcodecEncHWLock.pvHandle == 0) { MFV_LOGE("[WARNING] VCODEC_LOCKHW, maybe mediaserver restart before, please check!!\n"); } else { MFV_LOGE("[WARNING] VCODEC_LOCKHW, someone use HW, and check timeout value!! %d\n", gLockTimeOutCount); ++gLockTimeOutCount; if (gLockTimeOutCount > 30) { MFV_LOGE("[ERROR] VCODEC_LOCKHW - ID %d fail, someone locked HW time out more than 30 times 0x%lx, %lx, 0x%lx, type:%d\n", current->pid, (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); gLockTimeOutCount = 0; mutex_unlock(&VencHWLock); return -EFAULT; } if (rHWLock.u4TimeoutMs == 0) { MFV_LOGE("[ERROR] VCODEC_LOCKHW - ID %d fail, someone locked HW already 0x%lx, %lx, 0x%lx, type:%d\n", current->pid, (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); gLockTimeOutCount = 0; mutex_unlock(&VencHWLock); return -EFAULT; } } mutex_unlock(&VencHWLock); } } else if (VAL_RESULT_RESTARTSYS == eValRet) { return -ERESTARTSYS; } mutex_lock(&VencHWLock); if (grVcodecEncHWLock.pvHandle == 0) //No process use HW, so current process can use HW { if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_JPEG_ENC) { grVcodecEncHWLock.pvHandle = (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle); MFV_LOGD("VCODEC_LOCKHW, No process use HW, so current process can use HW, handle = 0x%lx\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle); grVcodecEncHWLock.eDriverType = rHWLock.eDriverType; eVideoGetTimeOfDay(&grVcodecEncHWLock.rLockedTime, sizeof(VAL_TIME_T)); MFV_LOGD("VCODEC_LOCKHW, No process use HW, so current process can use HW\n"); MFV_LOGD("VCODEC_LOCKHW, LockInstance = 0x%lx CurrentTID = %d, rLockedTime(s, us) = %d, %d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, current->pid, grVcodecEncHWLock.rLockedTime.u4Sec, grVcodecEncHWLock.rLockedTime.u4uSec); bLockedHW = VAL_TRUE; if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC) { venc_power_on(); //enable_irq(MT_VENC_IRQ_ID); enable_irq(VENC_IRQ_ID); } } } else //someone use HW, and check timeout value { if (rHWLock.u4TimeoutMs == 0) { bLockedHW = VAL_FALSE; mutex_unlock(&VencHWLock); break; } eVideoGetTimeOfDay(&rCurTime, sizeof(VAL_TIME_T)); u4TimeInterval = (((((rCurTime.u4Sec - grVcodecEncHWLock.rLockedTime.u4Sec) * 1000000) + rCurTime.u4uSec) - grVcodecEncHWLock.rLockedTime.u4uSec) / 1000); MFV_LOGD("VCODEC_LOCKHW, someone use enc HW, and check timeout value\n"); MFV_LOGD("VCODEC_LOCKHW, LockInstance = 0x%lx, CurrentInstance = 0x%lx, CurrentTID = %d, TimeInterval(ms) = %d, TimeOutValue(ms)) = %d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), current->pid, u4TimeInterval, rHWLock.u4TimeoutMs); MFV_LOGD("VCODEC_LOCKHW, LockInstance = 0x%lx, CurrentInstance = 0x%lx, CurrentTID = %d, rLockedTime(s, us) = %d, %d, rCurTime(s, us) = %d, %d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), current->pid, grVcodecEncHWLock.rLockedTime.u4Sec, grVcodecEncHWLock.rLockedTime.u4uSec, rCurTime.u4Sec, rCurTime.u4uSec ); ++gLockTimeOutCount; if (gLockTimeOutCount > 30) { MFV_LOGE("[ERROR] VCODEC_LOCKHW - ID %d fail, someone locked HW over 30 times without timeout 0x%lx, %lx, 0x%lx, type:%d\n", current->pid, (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); gLockTimeOutCount = 0; mutex_unlock(&VencHWLock); return -EFAULT; } // 2013/04/10. Cheng-Jung Never steal hardware lock if (0) //if (u4TimeInterval >= rHWLock.u4TimeoutMs) { if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_JPEG_ENC) { grVcodecEncHWLock.pvHandle = (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle); grVcodecEncHWLock.eDriverType = rHWLock.eDriverType; eVideoGetTimeOfDay(&grVcodecEncHWLock.rLockedTime, sizeof(VAL_TIME_T)); MFV_LOGD("VCODEC_LOCKHW, LockInstance = 0x%lx, CurrentTID = %d, rLockedTime(s, us) = %d, %d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, current->pid, grVcodecEncHWLock.rLockedTime.u4Sec, grVcodecEncHWLock.rLockedTime.u4uSec); bLockedHW = VAL_TRUE; if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC) { venc_power_on(); } } } } if (VAL_TRUE == bLockedHW) { MFV_LOGD("VCODEC_LOCKHW, Lock ok grVcodecEncHWLock.pvHandle = 0x%lx, va:%lx, type:%d", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); gLockTimeOutCount = 0; } mutex_unlock(&VencHWLock); } if (VAL_FALSE == bLockedHW) { MFV_LOGE("[ERROR] VCODEC_LOCKHW - ID %d fail, someone locked HW already , 0x%lx, %lx, 0x%lx, type:%d\n", current->pid, (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); gLockTimeOutCount = 0; return -EFAULT; } spin_lock_irqsave(&LockEncHWCountLock, ulFlagsLockHW); gu4LockEncHWCount++; spin_unlock_irqrestore(&LockEncHWCountLock, ulFlagsLockHW); MFV_LOGD("VCODEC_LOCKHW, get locked - ObjId =%d\n", current->pid); MFV_LOGD("VCODEC_LOCKHWed - tid = %d\n", current->pid); } else { MFV_LOGE("[WARNING] VCODEC_LOCKHW Unknown instance\n"); return -EFAULT; } MFV_LOGD("VCODEC_LOCKHW - tid = %d\n", current->pid); } break; case VCODEC_UNLOCKHW: { MFV_LOGD("VCODEC_UNLOCKHW + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rHWLock, user_data_addr, sizeof(VAL_HW_LOCK_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_UNLOCKHW, copy_from_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("VCODEC_UNLOCKHW eDriverType = %d\n", rHWLock.eDriverType); eValRet = VAL_RESULT_INVALID_ISR; if (rHWLock.eDriverType == VAL_DRIVER_TYPE_MP4_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_MP1_MP2_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VC1_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VC1_ADV_DEC || rHWLock.eDriverType == VAL_DRIVER_TYPE_VP8_DEC) { mutex_lock(&VdecHWLock); if (grVcodecDecHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle)) // Current owner give up hw lock { grVcodecDecHWLock.pvHandle = 0; grVcodecDecHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; if (rHWLock.bSecureInst == VAL_FALSE) { disable_irq(VDEC_IRQ_ID); } // TODO: check if turning power off is ok vdec_power_off(); #ifdef ENABLE_MMDVFS_VDEC // MM DVFS related if (VAL_TRUE == gMMDFVFSMonitorStarts && gMMDFVFSMonitorCounts > MONITOR_START_MINUS_1) { _monitor_duration = VdecDvfsGetMonitorDuration(); if (_monitor_duration < MONITOR_DURATION_MS) { _diff = VdecDvfsStep(); MFV_LOGD("[VCODEC][MMDVFS_VDEC] @@ UNLOCK - lock time(%d ms, %d ms), cnt=%d, _monitor_duration=%d", _diff, gHWLockInterval, gMMDFVFSMonitorCounts, _monitor_duration); } else { VdecDvfsStep(); _perc = (VAL_UINT32_T)(100 * gHWLockInterval / _monitor_duration); MFV_LOGE("[VCODEC][MMDVFS_VDEC] @@ UNLOCK - reset monitor duration (%d ms), percent: %d, (DROP_PERCENTAGE = %d, RAISE_PERCENTAGE = %d)", _monitor_duration, _perc, DROP_PERCENTAGE, RAISE_PERCENTAGE); if (_perc < DROP_PERCENTAGE) { SendDvfsRequest(DVFS_LOW); VdecDvfsEnd(DVFS_LOW); } else if (_perc > RAISE_PERCENTAGE) { SendDvfsRequest(DVFS_HIGH); VdecDvfsEnd(DVFS_HIGH); } else { VdecDvfsEnd(-1); } } } gMMDFVFSMonitorCounts ++; #endif } else // Not current owner { MFV_LOGE("[ERROR] VCODEC_UNLOCKHW, Not owner trying to unlock dec hardware 0x%lx\n", pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle)); mutex_unlock(&VdecHWLock); return -EFAULT; } mutex_unlock(&VdecHWLock); eValRet = eVideoSetEvent(&DecHWLockEvent, sizeof(VAL_EVENT_T)); } else if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_JPEG_ENC) { mutex_lock(&VencHWLock); if (grVcodecEncHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle)) // Current owner give up hw lock { grVcodecEncHWLock.pvHandle = 0; grVcodecEncHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; if (rHWLock.eDriverType == VAL_DRIVER_TYPE_H264_ENC || rHWLock.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC) { //disable_irq(MT_VENC_IRQ_ID); disable_irq(VENC_IRQ_ID); // turn venc power off venc_power_off(); } } else // Not current owner { // [TODO] error handling MFV_LOGE("[ERROR] VCODEC_UNLOCKHW, Not owner trying to unlock enc hardware 0x%lx, pa:%lx, va:%lx type:%d\n", (VAL_ULONG_T)grVcodecEncHWLock.pvHandle, pmem_user_v2p_video((VAL_ULONG_T)rHWLock.pvHandle), (VAL_ULONG_T)rHWLock.pvHandle, rHWLock.eDriverType); mutex_unlock(&VencHWLock); return -EFAULT; } mutex_unlock(&VencHWLock); eValRet = eVideoSetEvent(&EncHWLockEvent, sizeof(VAL_EVENT_T)); } else { MFV_LOGE("[WARNING] VCODEC_UNLOCKHW Unknown instance\n"); return -EFAULT; } MFV_LOGD("VCODEC_UNLOCKHW - tid = %d\n", current->pid); } break; case VCODEC_INC_PWR_USER: { MFV_LOGD("VCODEC_INC_PWR_USER + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rPowerParam, user_data_addr, sizeof(VAL_POWER_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_INC_PWR_USER, copy_from_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("[VCODEC] INC_PWR_USER eDriverType = %d\n", rPowerParam.eDriverType); mutex_lock(&L2CLock); #ifdef VENC_USE_L2C if (rPowerParam.eDriverType == VAL_DRIVER_TYPE_H264_ENC) { gu4L2CCounter++; MFV_LOGD("[VCODEC] INC_PWR_USER L2C counter = %d\n", gu4L2CCounter); if (1 == gu4L2CCounter) { if (config_L2(0)) { MFV_LOGE("[VCODEC][ERROR] Switch L2C size to 512K failed\n"); mutex_unlock(&L2CLock); return -EFAULT; } else { MFV_LOGE("[VCODEC] Switch L2C size to 512K successful\n"); } } } #endif mutex_unlock(&L2CLock); MFV_LOGD("VCODEC_INC_PWR_USER - tid = %d\n", current->pid); } break; case VCODEC_DEC_PWR_USER: { MFV_LOGD("VCODEC_DEC_PWR_USER + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rPowerParam, user_data_addr, sizeof(VAL_POWER_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_DEC_PWR_USER, copy_from_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("[VCODEC] DEC_PWR_USER eDriverType = %d\n", rPowerParam.eDriverType); mutex_lock(&L2CLock); #ifdef VENC_USE_L2C if (rPowerParam.eDriverType == VAL_DRIVER_TYPE_H264_ENC) { gu4L2CCounter--; MFV_LOGD("[VCODEC] DEC_PWR_USER L2C counter = %d\n", gu4L2CCounter); if (0 == gu4L2CCounter) { if (config_L2(1)) { MFV_LOGE("[VCODEC][ERROR] Switch L2C size to 0K failed\n"); mutex_unlock(&L2CLock); return -EFAULT; } else { MFV_LOGE("[VCODEC] Switch L2C size to 0K successful\n"); } } } #endif mutex_unlock(&L2CLock); MFV_LOGD("VCODEC_DEC_PWR_USER - tid = %d\n", current->pid); } break; case VCODEC_WAITISR: { MFV_LOGD("VCODEC_WAITISR + tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&val_isr, user_data_addr, sizeof(VAL_ISR_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_WAITISR, copy_from_user failed: %lu\n", ret); return -EFAULT; } if (val_isr.eDriverType == VAL_DRIVER_TYPE_MP4_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_HEVC_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_H264_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_MP1_MP2_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_VC1_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_VC1_ADV_DEC || val_isr.eDriverType == VAL_DRIVER_TYPE_VP8_DEC) { mutex_lock(&VdecHWLock); if (grVcodecDecHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)val_isr.pvHandle)) { bLockedHW = VAL_TRUE; } else { } mutex_unlock(&VdecHWLock); if (bLockedHW == VAL_FALSE) { MFV_LOGE("[ERROR] VCODEC_WAITISR, DO NOT have HWLock, so return fail\n"); break; } spin_lock_irqsave(&DecIsrLock, ulFlags); DecIsrEvent.u4TimeoutMs = val_isr.u4TimeoutMs; spin_unlock_irqrestore(&DecIsrLock, ulFlags); eValRet = eVideoWaitEvent(&DecIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_INVALID_ISR == eValRet) { return -2; } else if (VAL_RESULT_RESTARTSYS == eValRet) { MFV_LOGE("[WARNING] VCODEC_WAITISR, VAL_RESULT_RESTARTSYS return when WAITISR!!\n"); return -ERESTARTSYS; } } else if (val_isr.eDriverType == VAL_DRIVER_TYPE_H264_ENC || val_isr.eDriverType == VAL_DRIVER_TYPE_HEVC_ENC) { mutex_lock(&VencHWLock); if (grVcodecEncHWLock.pvHandle == (VAL_VOID_T *)pmem_user_v2p_video((VAL_ULONG_T)val_isr.pvHandle)) { bLockedHW = VAL_TRUE; } else { } mutex_unlock(&VencHWLock); if (bLockedHW == VAL_FALSE) { MFV_LOGE("[ERROR] VCODEC_WAITISR, DO NOT have enc HWLock, so return fail pa:%lx, va:%lx\n", pmem_user_v2p_video((VAL_ULONG_T)val_isr.pvHandle), (VAL_ULONG_T)val_isr.pvHandle); break; } spin_lock_irqsave(&EncIsrLock, ulFlags); EncIsrEvent.u4TimeoutMs = val_isr.u4TimeoutMs; spin_unlock_irqrestore(&EncIsrLock, ulFlags); eValRet = eVideoWaitEvent(&EncIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_INVALID_ISR == eValRet) { return -2; } else if (VAL_RESULT_RESTARTSYS == eValRet) { MFV_LOGE("[WARNING] VCODEC_WAITISR, VAL_RESULT_RESTARTSYS return when WAITISR!!\n"); return -ERESTARTSYS; } if (val_isr.u4IrqStatusNum > 0) { val_isr.u4IrqStatus[0] = gu4HwVencIrqStatus; ret = copy_to_user(user_data_addr, &val_isr, sizeof(VAL_ISR_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_WAITISR, copy_to_user failed: %lu\n", ret); return -EFAULT; } } } else { MFV_LOGE("[WARNING] VCODEC_WAITISR Unknown instance\n"); return -EFAULT; } MFV_LOGD("VCODEC_WAITISR - tid = %d\n", current->pid); } break; case VCODEC_INITHWLOCK: { MFV_LOGE("VCODEC_INITHWLOCK [EMPTY] + - tid = %d\n", current->pid); MFV_LOGE("VCODEC_INITHWLOCK [EMPTY] - - tid = %d\n", current->pid); } break; case VCODEC_DEINITHWLOCK: { MFV_LOGE("VCODEC_DEINITHWLOCK [EMPTY] + - tid = %d\n", current->pid); MFV_LOGE("VCODEC_DEINITHWLOCK [EMPTY] - - tid = %d\n", current->pid); } break; case VCODEC_GET_CPU_LOADING_INFO: { VAL_UINT8_T *user_data_addr; VAL_VCODEC_CPU_LOADING_INFO_T _temp; MFV_LOGD("VCODEC_GET_CPU_LOADING_INFO +\n"); user_data_addr = (VAL_UINT8_T *)arg; // TODO: #if 0 // Morris Yang 20120112 mark temporarily _temp._cpu_idle_time = mt_get_cpu_idle(0); _temp._thread_cpu_time = mt_get_thread_cputime(0); spin_lock_irqsave(&OalHWContextLock, ulFlags); _temp._inst_count = getCurInstanceCount(); spin_unlock_irqrestore(&OalHWContextLock, ulFlags); _temp._sched_clock = mt_sched_clock(); #endif ret = copy_to_user(user_data_addr, &_temp, sizeof(VAL_VCODEC_CPU_LOADING_INFO_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_GET_CPU_LOADING_INFO, copy_to_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("VCODEC_GET_CPU_LOADING_INFO -\n"); } break; case VCODEC_GET_CORE_LOADING: { MFV_LOGD("VCODEC_GET_CORE_LOADING + - tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rTempCoreLoading, user_data_addr, sizeof(VAL_VCODEC_CORE_LOADING_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_GET_CORE_LOADING, copy_from_user failed: %lu\n", ret); return -EFAULT; } rTempCoreLoading.Loading = get_cpu_load(rTempCoreLoading.CPUid); ret = copy_to_user(user_data_addr, &rTempCoreLoading, sizeof(VAL_VCODEC_CORE_LOADING_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_GET_CORE_LOADING, copy_to_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("VCODEC_GET_CORE_LOADING - - tid = %d\n", current->pid); } break; case VCODEC_GET_CORE_NUMBER: { MFV_LOGD("VCODEC_GET_CORE_NUMBER + - tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; temp_nr_cpu_ids = nr_cpu_ids; ret = copy_to_user(user_data_addr, &temp_nr_cpu_ids, sizeof(int)); if (ret) { MFV_LOGE("[ERROR] VCODEC_GET_CORE_NUMBER, copy_to_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGD("VCODEC_GET_CORE_NUMBER - - tid = %d\n", current->pid); } break; case VCODEC_SET_CPU_OPP_LIMIT: { MFV_LOGE("VCODEC_SET_CPU_OPP_LIMIT [EMPTY] + - tid = %d\n", current->pid); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rCpuOppLimit, user_data_addr, sizeof(VAL_VCODEC_CPU_OPP_LIMIT_T)); if (ret) { MFV_LOGE("[ERROR] VCODEC_SET_CPU_OPP_LIMIT, copy_from_user failed: %lu\n", ret); return -EFAULT; } MFV_LOGE("+VCODEC_SET_CPU_OPP_LIMIT (%d, %d, %d), tid = %d\n", rCpuOppLimit.limited_freq, rCpuOppLimit.limited_cpu, rCpuOppLimit.enable, current->pid); // TODO: Check if cpu_opp_limit is available //ret = cpu_opp_limit(EVENT_VIDEO, rCpuOppLimit.limited_freq, rCpuOppLimit.limited_cpu, rCpuOppLimit.enable); // 0: PASS, other: FAIL if (ret) { MFV_LOGE("[VCODEC][ERROR] cpu_opp_limit failed: %lu\n", ret); return -EFAULT; } MFV_LOGE("-VCODEC_SET_CPU_OPP_LIMIT tid = %d, ret = %lu\n", current->pid, ret); MFV_LOGE("VCODEC_SET_CPU_OPP_LIMIT [EMPTY] - - tid = %d\n", current->pid); } break; case VCODEC_MB: { mb(); } break; #if 0 case MFV_SET_CMD_CMD: MFV_LOGD("[MFV] MFV_SET_CMD_CMD\n"); MFV_LOGD("[MFV] Arg = %x\n", arg); user_data_addr = (VAL_UINT8_T *)arg; ret = copy_from_user(&rDrvCmdQueue, user_data_addr, sizeof(VCODEC_DRV_CMD_QUEUE_T)); MFV_LOGD("[MFV] CmdNum = %d\n", rDrvCmdQueue.CmdNum); u4Size = (rDrvCmdQueue.CmdNum) * sizeof(VCODEC_DRV_CMD_T); cmd_queue = (P_VCODEC_DRV_CMD_T)kmalloc(u4Size, GFP_ATOMIC); if (cmd_queue != VAL_NULL && rDrvCmdQueue.pCmd != VAL_NULL) { ret = copy_from_user(cmd_queue, rDrvCmdQueue.pCmd, u4Size); while (cmd_queue->type != END_CMD) { switch (cmd_queue->type) { case ENABLE_HW_CMD: break; case DISABLE_HW_CMD: break; case WRITE_REG_CMD: VDO_HW_WRITE(cmd_queue->address + cmd_queue->offset, cmd_queue->value); break; case READ_REG_CMD: uValue = VDO_HW_READ(cmd_queue->address + cmd_queue->offset); copy_to_user((void *)cmd_queue->value, &uValue, sizeof(VAL_UINT32_T)); break; case WRITE_SYSRAM_CMD: VDO_HW_WRITE(cmd_queue->address + cmd_queue->offset, cmd_queue->value); break; case READ_SYSRAM_CMD: uValue = VDO_HW_READ(cmd_queue->address + cmd_queue->offset); copy_to_user((void *)cmd_queue->value, &uValue, sizeof(VAL_UINT32_T)); break; case MASTER_WRITE_CMD: uValue = VDO_HW_READ(cmd_queue->address + cmd_queue->offset); VDO_HW_WRITE(cmd_queue->address + cmd_queue->offset, cmd_queue->value | (uValue & cmd_queue->mask)); break; case SETUP_ISR_CMD: break; case WAIT_ISR_CMD: MFV_LOGD("HAL_CMD_SET_CMD_QUEUE: WAIT_ISR_CMD+\n"); MFV_LOGD("HAL_CMD_SET_CMD_QUEUE: WAIT_ISR_CMD-\n"); break; case TIMEOUT_CMD: break; case WRITE_SYSRAM_RANGE_CMD: break; case READ_SYSRAM_RANGE_CMD: break; case POLL_REG_STATUS_CMD: uValue = VDO_HW_READ(cmd_queue->address + cmd_queue->offset); nCount = 0; while ((uValue & cmd_queue->mask) != 0) { nCount++; if (nCount > 1000) { break; } uValue = VDO_HW_READ(cmd_queue->address + cmd_queue->offset); } break; default: break; } cmd_queue++; } } break; #endif default: { MFV_LOGE("========[ERROR] vcodec_ioctl default case======== %u\n", cmd); } break; } return 0xFF; } #if IS_ENABLED(CONFIG_COMPAT) typedef enum { VAL_HW_LOCK_TYPE = 0, VAL_POWER_TYPE, VAL_ISR_TYPE, VAL_MEMORY_TYPE } STRUCT_TYPE; typedef enum { COPY_FROM_USER = 0, COPY_TO_USER, } COPY_DIRECTION; typedef struct COMPAT_VAL_HW_LOCK { compat_uptr_t pvHandle; ///< [IN] The video codec driver handle compat_uint_t u4HandleSize; ///< [IN] The size of video codec driver handle compat_uptr_t pvLock; ///< [IN/OUT] The Lock discriptor compat_uint_t u4TimeoutMs; ///< [IN] The timeout ms compat_uptr_t pvReserved; ///< [IN/OUT] The reserved parameter compat_uint_t u4ReservedSize; ///< [IN] The size of reserved parameter structure compat_uint_t eDriverType; ///< [IN] The driver type char bSecureInst; ///< [IN] True if this is a secure instance // MTK_SEC_VIDEO_PATH_SUPPORT } COMPAT_VAL_HW_LOCK_T; typedef struct COMPAT_VAL_POWER { compat_uptr_t pvHandle; ///< [IN] The video codec driver handle compat_uint_t u4HandleSize; ///< [IN] The size of video codec driver handle compat_uint_t eDriverType; ///< [IN] The driver type char fgEnable; ///< [IN] Enable or not. compat_uptr_t pvReserved; ///< [IN/OUT] The reserved parameter compat_uint_t u4ReservedSize; ///< [IN] The size of reserved parameter structure //VAL_UINT32_T u4L2CUser; ///< [OUT] The number of power user right now } COMPAT_VAL_POWER_T; typedef struct COMPAT_VAL_ISR { compat_uptr_t pvHandle; ///< [IN] The video codec driver handle compat_uint_t u4HandleSize; ///< [IN] The size of video codec driver handle compat_uint_t eDriverType; ///< [IN] The driver type compat_uptr_t pvIsrFunction; ///< [IN] The isr function compat_uptr_t pvReserved; ///< [IN/OUT] The reserved parameter compat_uint_t u4ReservedSize; ///< [IN] The size of reserved parameter structure compat_uint_t u4TimeoutMs; ///< [IN] The timeout in ms compat_uint_t u4IrqStatusNum; ///< [IN] The num of return registers when HW done compat_uint_t u4IrqStatus[IRQ_STATUS_MAX_NUM]; ///< [IN/OUT] The value of return registers when HW done } COMPAT_VAL_ISR_T; typedef struct COMPAT_VAL_MEMORY { compat_uint_t eMemType; ///< [IN] The allocation memory type compat_ulong_t u4MemSize; ///< [IN] The size of memory allocation compat_uptr_t pvMemVa; ///< [IN/OUT] The memory virtual address compat_uptr_t pvMemPa; ///< [IN/OUT] The memory physical address compat_uint_t eAlignment; ///< [IN] The memory byte alignment setting compat_uptr_t pvAlignMemVa; ///< [IN/OUT] The align memory virtual address compat_uptr_t pvAlignMemPa; ///< [IN/OUT] The align memory physical address compat_uint_t eMemCodec; ///< [IN] The memory codec for VENC or VDEC compat_uint_t i4IonShareFd; compat_uptr_t pIonBufhandle; compat_uptr_t pvReserved; ///< [IN/OUT] The reserved parameter compat_ulong_t u4ReservedSize; ///< [IN] The size of reserved parameter structure } COMPAT_VAL_MEMORY_T; static int compat_copy_struct( STRUCT_TYPE eType, COPY_DIRECTION eDirection, void __user *data32, void __user *data) { compat_uint_t u; compat_ulong_t l; compat_uptr_t p; char c; int err = 0; switch (eType) { case VAL_HW_LOCK_TYPE: { if (eDirection == COPY_FROM_USER) { COMPAT_VAL_HW_LOCK_T __user *from32 = (COMPAT_VAL_HW_LOCK_T *)data32; VAL_HW_LOCK_T __user *to = (VAL_HW_LOCK_T *)data; err = get_user(p, &(from32->pvHandle)); err |= put_user(p, &(to->pvHandle)); err |= get_user(u, &(from32->u4HandleSize)); err |= put_user(u, &(to->u4HandleSize)); err |= get_user(p, &(from32->pvLock)); err |= put_user(p, &(to->pvLock)); err |= get_user(u, &(from32->u4TimeoutMs)); err |= put_user(u, &(to->u4TimeoutMs)); err |= get_user(p, &(from32->pvReserved)); err |= put_user(p, &(to->pvReserved)); err |= get_user(u, &(from32->u4ReservedSize)); err |= put_user(u, &(to->u4ReservedSize)); err |= get_user(u, &(from32->eDriverType)); err |= put_user(u, &(to->eDriverType)); err |= get_user(c, &(from32->bSecureInst)); err |= put_user(c, &(to->bSecureInst)); } else { COMPAT_VAL_HW_LOCK_T __user *to32 = (COMPAT_VAL_HW_LOCK_T *)data32; VAL_HW_LOCK_T __user *from = (VAL_HW_LOCK_T *)data; err = get_user(p, &(from->pvHandle)); err |= put_user(p, &(to32->pvHandle)); err |= get_user(u, &(from->u4HandleSize)); err |= put_user(u, &(to32->u4HandleSize)); err |= get_user(p, &(from->pvLock)); err |= put_user(p, &(to32->pvLock)); err |= get_user(u, &(from->u4TimeoutMs)); err |= put_user(u, &(to32->u4TimeoutMs)); err |= get_user(p, &(from->pvReserved)); err |= put_user(p, &(to32->pvReserved)); err |= get_user(u, &(from->u4ReservedSize)); err |= put_user(u, &(to32->u4ReservedSize)); err |= get_user(u, &(from->eDriverType)); err |= put_user(u, &(to32->eDriverType)); err |= get_user(c, &(from->bSecureInst)); err |= put_user(c, &(to32->bSecureInst)); } } break; case VAL_POWER_TYPE: { if (eDirection == COPY_FROM_USER) { COMPAT_VAL_POWER_T __user *from32 = (COMPAT_VAL_POWER_T *)data32; VAL_POWER_T __user *to = (VAL_POWER_T *)data; err = get_user(p, &(from32->pvHandle)); err |= put_user(p, &(to->pvHandle)); err |= get_user(u, &(from32->u4HandleSize)); err |= put_user(u, &(to->u4HandleSize)); err |= get_user(u, &(from32->eDriverType)); err |= put_user(u, &(to->eDriverType)); err |= get_user(c, &(from32->fgEnable)); err |= put_user(c, &(to->fgEnable)); err |= get_user(p, &(from32->pvReserved)); err |= put_user(p, &(to->pvReserved)); err |= get_user(u, &(from32->u4ReservedSize)); err |= put_user(u, &(to->u4ReservedSize)); } else { COMPAT_VAL_POWER_T __user *to32 = (COMPAT_VAL_POWER_T *)data32; VAL_POWER_T __user *from = (VAL_POWER_T *)data; err = get_user(p, &(from->pvHandle)); err |= put_user(p, &(to32->pvHandle)); err |= get_user(u, &(from->u4HandleSize)); err |= put_user(u, &(to32->u4HandleSize)); err |= get_user(u, &(from->eDriverType)); err |= put_user(u, &(to32->eDriverType)); err |= get_user(c, &(from->fgEnable)); err |= put_user(c, &(to32->fgEnable)); err |= get_user(p, &(from->pvReserved)); err |= put_user(p, &(to32->pvReserved)); err |= get_user(u, &(from->u4ReservedSize)); err |= put_user(u, &(to32->u4ReservedSize)); } } break; case VAL_ISR_TYPE: { int i = 0; if (eDirection == COPY_FROM_USER) { COMPAT_VAL_ISR_T __user *from32 = (COMPAT_VAL_ISR_T *)data32; VAL_ISR_T __user *to = (VAL_ISR_T *)data; err = get_user(p, &(from32->pvHandle)); err |= put_user(p, &(to->pvHandle)); err |= get_user(u, &(from32->u4HandleSize)); err |= put_user(u, &(to->u4HandleSize)); err |= get_user(u, &(from32->eDriverType)); err |= put_user(u, &(to->eDriverType)); err |= get_user(p, &(from32->pvIsrFunction)); err |= put_user(p, &(to->pvIsrFunction)); err |= get_user(p, &(from32->pvReserved)); err |= put_user(p, &(to->pvReserved)); err |= get_user(u, &(from32->u4ReservedSize)); err |= put_user(u, &(to->u4ReservedSize)); err |= get_user(u, &(from32->u4TimeoutMs)); err |= put_user(u, &(to->u4TimeoutMs)); err |= get_user(u, &(from32->u4IrqStatusNum)); err |= put_user(u, &(to->u4IrqStatusNum)); for (; i < IRQ_STATUS_MAX_NUM; i++) { err |= get_user(u, &(from32->u4IrqStatus[i])); err |= put_user(u, &(to->u4IrqStatus[i])); } return err; } else { COMPAT_VAL_ISR_T __user *to32 = (COMPAT_VAL_ISR_T *)data32; VAL_ISR_T __user *from = (VAL_ISR_T *)data; err = get_user(p, &(from->pvHandle)); err |= put_user(p, &(to32->pvHandle)); err |= get_user(u, &(from->u4HandleSize)); err |= put_user(u, &(to32->u4HandleSize)); err |= get_user(u, &(from->eDriverType)); err |= put_user(u, &(to32->eDriverType)); err |= get_user(p, &(from->pvIsrFunction)); err |= put_user(p, &(to32->pvIsrFunction)); err |= get_user(p, &(from->pvReserved)); err |= put_user(p, &(to32->pvReserved)); err |= get_user(u, &(from->u4ReservedSize)); err |= put_user(u, &(to32->u4ReservedSize)); err |= get_user(u, &(from->u4TimeoutMs)); err |= put_user(u, &(to32->u4TimeoutMs)); err |= get_user(u, &(from->u4IrqStatusNum)); err |= put_user(u, &(to32->u4IrqStatusNum)); for (; i < IRQ_STATUS_MAX_NUM; i++) { err |= get_user(u, &(from->u4IrqStatus[i])); err |= put_user(u, &(to32->u4IrqStatus[i])); } } } break; case VAL_MEMORY_TYPE: { if (eDirection == COPY_FROM_USER) { COMPAT_VAL_MEMORY_T __user *from32 = (COMPAT_VAL_MEMORY_T *)data32; VAL_MEMORY_T __user *to = (VAL_MEMORY_T *)data; err = get_user(u, &(from32->eMemType)); err |= put_user(u, &(to->eMemType)); err |= get_user(l, &(from32->u4MemSize)); err |= put_user(l, &(to->u4MemSize)); err |= get_user(p, &(from32->pvMemVa)); err |= put_user(p, &(to->pvMemVa)); err |= get_user(p, &(from32->pvMemPa)); err |= put_user(p, &(to->pvMemPa)); err |= get_user(u, &(from32->eAlignment)); err |= put_user(u, &(to->eAlignment)); err |= get_user(p, &(from32->pvAlignMemVa)); err |= put_user(p, &(to->pvAlignMemVa)); err |= get_user(p, &(from32->pvAlignMemPa)); err |= put_user(p, &(to->pvAlignMemPa)); err |= get_user(u, &(from32->eMemCodec)); err |= put_user(u, &(to->eMemCodec)); err |= get_user(u, &(from32->i4IonShareFd)); err |= put_user(u, &(to->i4IonShareFd)); err |= get_user(p, &(from32->pIonBufhandle)); err |= put_user(p, &(to->pIonBufhandle)); err |= get_user(p, &(from32->pvReserved)); err |= put_user(p, &(to->pvReserved)); err |= get_user(l, &(from32->u4ReservedSize)); err |= put_user(l, &(to->u4ReservedSize)); return err; } else { COMPAT_VAL_MEMORY_T __user *to32 = (COMPAT_VAL_MEMORY_T *)data32; VAL_MEMORY_T __user *from = (VAL_MEMORY_T *)data; err = get_user(u, &(from->eMemType)); err |= put_user(u, &(to32->eMemType)); err |= get_user(l, &(from->u4MemSize)); err |= put_user(l, &(to32->u4MemSize)); err |= get_user(p, &(from->pvMemVa)); err |= put_user(p, &(to32->pvMemVa)); err |= get_user(p, &(from->pvMemPa)); err |= put_user(p, &(to32->pvMemPa)); err |= get_user(u, &(from->eAlignment)); err |= put_user(u, &(to32->eAlignment)); err |= get_user(p, &(from->pvAlignMemVa)); err |= put_user(p, &(to32->pvAlignMemVa)); err |= get_user(p, &(from->pvAlignMemPa)); err |= put_user(p, &(to32->pvAlignMemPa)); err |= get_user(u, &(from->eMemCodec)); err |= put_user(u, &(to32->eMemCodec)); err |= get_user(u, &(from->i4IonShareFd)); err |= put_user(u, &(to32->i4IonShareFd)); err |= get_user(p, &(from->pIonBufhandle)); err |= put_user(p, &(to32->pIonBufhandle)); err |= get_user(p, &(from->pvReserved)); err |= put_user(p, &(to32->pvReserved)); err |= get_user(l, &(from->u4ReservedSize)); err |= put_user(l, &(to32->u4ReservedSize)); } } break; default: break; } return err; } static long vcodec_unlocked_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = 0; MFV_LOGD("vcodec_unlocked_compat_ioctl: 0x%x\n", cmd); switch (cmd) { case VCODEC_ALLOC_NON_CACHE_BUFFER: case VCODEC_FREE_NON_CACHE_BUFFER: { COMPAT_VAL_MEMORY_T __user *data32; VAL_MEMORY_T __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(VAL_MEMORY_T)); if (data == NULL) { return -EFAULT; } err = compat_copy_struct(VAL_MEMORY_TYPE, COPY_FROM_USER, (void *)data32, (void *)data); if (err) { return err; } ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)data); err = compat_copy_struct(VAL_MEMORY_TYPE, COPY_TO_USER, (void *)data32, (void *)data); if (err) { return err; } return ret; } break; case VCODEC_LOCKHW: case VCODEC_UNLOCKHW: { COMPAT_VAL_HW_LOCK_T __user *data32; VAL_HW_LOCK_T __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(VAL_HW_LOCK_T)); if (data == NULL) { return -EFAULT; } err = compat_copy_struct(VAL_HW_LOCK_TYPE, COPY_FROM_USER, (void *)data32, (void *)data); if (err) { return err; } ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)data); err = compat_copy_struct(VAL_HW_LOCK_TYPE, COPY_TO_USER, (void *)data32, (void *)data); if (err) { return err; } return ret; } break; case VCODEC_INC_PWR_USER: case VCODEC_DEC_PWR_USER: { COMPAT_VAL_POWER_T __user *data32; VAL_POWER_T __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(VAL_POWER_T)); if (data == NULL) { return -EFAULT; } err = compat_copy_struct(VAL_POWER_TYPE, COPY_FROM_USER, (void *)data32, (void *)data); if (err) { return err; } ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)data); err = compat_copy_struct(VAL_POWER_TYPE, COPY_TO_USER, (void *)data32, (void *)data); if (err) { return err; } return ret; } break; case VCODEC_WAITISR: { COMPAT_VAL_ISR_T __user *data32; VAL_ISR_T __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(VAL_ISR_T)); if (data == NULL) { return -EFAULT; } err = compat_copy_struct(VAL_ISR_TYPE, COPY_FROM_USER, (void *)data32, (void *)data); if (err) { return err; } ret = file->f_op->unlocked_ioctl(file, VCODEC_WAITISR, (unsigned long)data); err = compat_copy_struct(VAL_ISR_TYPE, COPY_TO_USER, (void *)data32, (void *)data); if (err) { return err; } return ret; } break; default: { return vcodec_unlocked_ioctl(file, cmd, arg); } break; } return 0; } #else #define vcodec_unlocked_compat_ioctl NULL #endif static int vcodec_open(struct inode *inode, struct file *file) { MFV_LOGD("vcodec_open\n"); mutex_lock(&DriverOpenCountLock); Driver_Open_Count++; MFV_LOGE("vcodec_open pid = %d, Driver_Open_Count %d\n", current->pid, Driver_Open_Count); mutex_unlock(&DriverOpenCountLock); // TODO: Check upper limit of concurrent users? return 0; } static int vcodec_flush(struct file *file, fl_owner_t id) { MFV_LOGD("vcodec_flush, curr_tid =%d\n", current->pid); MFV_LOGE("vcodec_flush pid = %d, Driver_Open_Count %d\n", current->pid, Driver_Open_Count); return 0; } static int vcodec_release(struct inode *inode, struct file *file) { VAL_ULONG_T ulFlagsLockHW, ulFlagsISR; //dump_stack(); MFV_LOGD("vcodec_release, curr_tid =%d\n", current->pid); mutex_lock(&DriverOpenCountLock); MFV_LOGE("vcodec_release pid = %d, Driver_Open_Count %d\n", current->pid, Driver_Open_Count); Driver_Open_Count--; if (Driver_Open_Count == 0) { mutex_lock(&VdecHWLock); gu4VdecLockThreadId = 0; grVcodecDecHWLock.pvHandle = 0; grVcodecDecHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; grVcodecDecHWLock.rLockedTime.u4Sec = 0; grVcodecDecHWLock.rLockedTime.u4uSec = 0; mutex_unlock(&VdecHWLock); mutex_lock(&VencHWLock); grVcodecEncHWLock.pvHandle = 0; grVcodecEncHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; grVcodecEncHWLock.rLockedTime.u4Sec = 0; grVcodecEncHWLock.rLockedTime.u4uSec = 0; mutex_unlock(&VencHWLock); mutex_lock(&DecEMILock); gu4DecEMICounter = 0; mutex_unlock(&DecEMILock); mutex_lock(&EncEMILock); gu4EncEMICounter = 0; mutex_unlock(&EncEMILock); mutex_lock(&PWRLock); gu4PWRCounter = 0; mutex_unlock(&PWRLock); #if defined(VENC_USE_L2C) mutex_lock(&L2CLock); if (gu4L2CCounter != 0) { MFV_LOGE("vcodec_flush pid = %d, L2 user = %d, force restore L2 settings\n", current->pid, gu4L2CCounter); if (config_L2(1)) { MFV_LOGE("[VCODEC][ERROR] restore L2 settings failed\n"); } } gu4L2CCounter = 0; mutex_unlock(&L2CLock); #endif spin_lock_irqsave(&LockDecHWCountLock, ulFlagsLockHW); gu4LockDecHWCount = 0; spin_unlock_irqrestore(&LockDecHWCountLock, ulFlagsLockHW); spin_lock_irqsave(&LockEncHWCountLock, ulFlagsLockHW); gu4LockEncHWCount = 0; spin_unlock_irqrestore(&LockEncHWCountLock, ulFlagsLockHW); spin_lock_irqsave(&DecISRCountLock, ulFlagsISR); gu4DecISRCount = 0; spin_unlock_irqrestore(&DecISRCountLock, ulFlagsISR); spin_lock_irqsave(&EncISRCountLock, ulFlagsISR); gu4EncISRCount = 0; spin_unlock_irqrestore(&EncISRCountLock, ulFlagsISR); #ifdef ENABLE_MMDVFS_VDEC if (VAL_TRUE == gMMDFVFSMonitorStarts) { gMMDFVFSMonitorStarts = VAL_FALSE; gMMDFVFSMonitorCounts = 0; gHWLockInterval = 0; gHWLockMaxDuration = 0; SendDvfsRequest(DVFS_LOW); } #endif } #ifdef ENABLE_MMDVFS_VDEC mutex_lock(&DecEMILock); if (VAL_TRUE == gMMDFVFSMonitorStarts && 0 == gu4DecEMICounter) { gMMDFVFSMonitorStarts = VAL_FALSE; gMMDFVFSMonitorCounts = 0; gHWLockInterval = 0; gHWLockMaxDuration = 0; SendDvfsRequest(DVFS_LOW); } mutex_unlock(&DecEMILock); #endif mutex_unlock(&DriverOpenCountLock); return 0; } void vcodec_vma_open(struct vm_area_struct *vma) { MFV_LOGD("vcodec VMA open, virt %lx, phys %lx\n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); } void vcodec_vma_close(struct vm_area_struct *vma) { MFV_LOGD("vcodec VMA close, virt %lx, phys %lx\n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); } static struct vm_operations_struct vcodec_remap_vm_ops = { .open = vcodec_vma_open, .close = vcodec_vma_close, }; static int vcodec_mmap(struct file *file, struct vm_area_struct *vma) { #if 1 VAL_UINT32_T u4I = 0; VAL_ULONG_T length; VAL_ULONG_T pfn; length = vma->vm_end - vma->vm_start; pfn = vma->vm_pgoff << PAGE_SHIFT; if (((length > VENC_REGION) || (pfn < VENC_BASE) || (pfn > VENC_BASE + VENC_REGION)) && ((length > VDEC_REGION) || (pfn < VDEC_BASE_PHY) || (pfn > VDEC_BASE_PHY + VDEC_REGION)) && ((length > HW_REGION) || (pfn < HW_BASE) || (pfn > HW_BASE + HW_REGION)) && ((length > INFO_REGION) || (pfn < INFO_BASE) || (pfn > INFO_BASE + INFO_REGION)) ) { VAL_ULONG_T ulAddr, ulSize; for (u4I = 0; u4I < VCODEC_MULTIPLE_INSTANCE_NUM_x_10; u4I++) { if ((grNonCacheMemoryList[u4I].ulKVA != -1L) && (grNonCacheMemoryList[u4I].ulKPA != -1L)) { ulAddr = grNonCacheMemoryList[u4I].ulKPA; ulSize = (grNonCacheMemoryList[u4I].ulSize + 0x1000 - 1) & ~(0x1000 - 1); if ((length == ulSize) && (pfn == ulAddr)) { MFV_LOGD("[VCODEC] cache idx %d \n", u4I); break; } } } if (u4I == VCODEC_MULTIPLE_INSTANCE_NUM_x_10) { MFV_LOGE("[VCODEC][ERROR] mmap region error: Length(0x%lx), pfn(0x%lx)\n", (VAL_ULONG_T)length, pfn); return -EAGAIN; } } #endif vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); MFV_LOGE("[VCODEC][mmap] vma->start 0x%lx, vma->end 0x%lx, vma->pgoff 0x%lx\n", (VAL_ULONG_T)vma->vm_start, (VAL_ULONG_T)vma->vm_end, (VAL_ULONG_T)vma->vm_pgoff); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { return -EAGAIN; } vma->vm_ops = &vcodec_remap_vm_ops; vcodec_vma_open(vma); return 0; } static struct file_operations vcodec_fops = { .owner = THIS_MODULE, .unlocked_ioctl = vcodec_unlocked_ioctl, .open = vcodec_open, .flush = vcodec_flush, .release = vcodec_release, .mmap = vcodec_mmap, #if IS_ENABLED(CONFIG_COMPAT) .compat_ioctl = vcodec_unlocked_compat_ioctl, #endif }; static int vcodec_probe(struct platform_device *dev) { int ret; MFV_LOGD("+vcodec_probe\n"); mutex_lock(&DecEMILock); gu4DecEMICounter = 0; mutex_unlock(&DecEMILock); mutex_lock(&EncEMILock); gu4EncEMICounter = 0; mutex_unlock(&EncEMILock); mutex_lock(&PWRLock); gu4PWRCounter = 0; mutex_unlock(&PWRLock); mutex_lock(&L2CLock); gu4L2CCounter = 0; mutex_unlock(&L2CLock); ret = register_chrdev_region(vcodec_devno, 1, VCODEC_DEVNAME); if (ret) { MFV_LOGE("[ERROR] Can't Get Major number for VCodec Device\n"); } vcodec_cdev = cdev_alloc(); vcodec_cdev->owner = THIS_MODULE; vcodec_cdev->ops = &vcodec_fops; ret = cdev_add(vcodec_cdev, vcodec_devno, 1); if (ret) { MFV_LOGE("[ERROR] Can't add Vcodec Device\n"); } vcodec_class = class_create(THIS_MODULE, VCODEC_DEVNAME); if (IS_ERR(vcodec_class)) { ret = PTR_ERR(vcodec_class); MFV_LOGE("[VCODEC][ERROR] Unable to create class, err = %d", ret); return ret; } vcodec_device = device_create(vcodec_class, NULL, vcodec_devno, NULL, VCODEC_DEVNAME); //if (request_irq(MT_VDEC_IRQ_ID , (irq_handler_t)video_intr_dlr, IRQF_TRIGGER_LOW, VCODEC_DEVNAME, NULL) < 0) if (request_irq(VDEC_IRQ_ID , (irq_handler_t)video_intr_dlr, IRQF_TRIGGER_LOW, VCODEC_DEVNAME, NULL) < 0) { MFV_LOGE("[VCODEC][ERROR] error to request dec irq\n"); } else { MFV_LOGD("[VCODEC] success to request dec irq: %d\n", VDEC_IRQ_ID); } //if (request_irq(MT_VENC_IRQ_ID , (irq_handler_t)video_intr_dlr2, IRQF_TRIGGER_LOW, VCODEC_DEVNAME, NULL) < 0) if (request_irq(VENC_IRQ_ID , (irq_handler_t)video_intr_dlr2, IRQF_TRIGGER_LOW, VCODEC_DEVNAME, NULL) < 0) { MFV_LOGD("[VCODEC][ERROR] error to request enc irq\n"); } else { MFV_LOGD("[VCODEC] success to request enc irq: %d\n", VENC_IRQ_ID); } //disable_irq(MT_VDEC_IRQ_ID); disable_irq(VDEC_IRQ_ID); //disable_irq(MT_VENC_IRQ_ID); disable_irq(VENC_IRQ_ID); #if !defined(CONFIG_MTK_LEGACY) clk_MT_CG_DISP0_SMI_COMMON = devm_clk_get(&dev->dev, "MT_CG_DISP0_SMI_COMMON"); if (IS_ERR(clk_MT_CG_DISP0_SMI_COMMON)) { MFV_LOGE("[VCODEC][ERROR] Unable to devm_clk_get MT_CG_DISP0_SMI_COMMON\n"); return PTR_ERR(clk_MT_CG_DISP0_SMI_COMMON); } clk_MT_CG_VDEC0_VDEC = devm_clk_get(&dev->dev, "MT_CG_VDEC0_VDEC"); if (IS_ERR(clk_MT_CG_VDEC0_VDEC)) { MFV_LOGE("[VCODEC][ERROR] Unable to devm_clk_get MT_CG_VDEC0_VDEC\n"); return PTR_ERR(clk_MT_CG_VDEC0_VDEC); } clk_MT_CG_VDEC1_LARB = devm_clk_get(&dev->dev, "MT_CG_VDEC1_LARB"); if (IS_ERR(clk_MT_CG_VDEC1_LARB)) { MFV_LOGE("[VCODEC][ERROR] Unable to devm_clk_get MT_CG_VDEC1_LARB\n"); return PTR_ERR(clk_MT_CG_VDEC1_LARB); } clk_MT_CG_VENC_VENC = devm_clk_get(&dev->dev, "MT_CG_VENC_VENC"); if (IS_ERR(clk_MT_CG_VENC_VENC)) { MFV_LOGE("[VCODEC][ERROR] Unable to devm_clk_get MT_CG_VENC_VENC\n"); return PTR_ERR(clk_MT_CG_VENC_VENC); } clk_MT_CG_VENC_LARB = devm_clk_get(&dev->dev, "MT_CG_VENC_LARB"); if (IS_ERR(clk_MT_CG_VENC_LARB)) { MFV_LOGE("[VCODEC][ERROR] Unable to devm_clk_get MT_CG_VENC_LARB\n"); return PTR_ERR(clk_MT_CG_VENC_LARB); } #endif /* !defined(CONFIG_MTK_LEGACY) */ MFV_LOGD("vcodec_probe Done\n"); return 0; } static int vcodec_remove(struct platform_device *pDev) { MFV_LOGD("vcodec_remove\n"); return 0; } #ifdef CONFIG_MTK_HIBERNATION extern void mt_irq_set_sens(unsigned int irq, unsigned int sens); extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity); static int vcodec_pm_restore_noirq(struct device *device) { // vdec: IRQF_TRIGGER_LOW mt_irq_set_sens(VDEC_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(VDEC_IRQ_ID, MT_POLARITY_LOW); // venc: IRQF_TRIGGER_LOW mt_irq_set_sens(VENC_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(VENC_IRQ_ID, MT_POLARITY_LOW); return 0; } #endif static const struct of_device_id vcodec_of_match[] = { { .compatible = "mediatek,VDEC_GCON", }, {/* sentinel */} }; MODULE_DEVICE_TABLE(of, vcodec_of_match); static struct platform_driver vcodec_driver = { .probe = vcodec_probe, .remove = vcodec_remove, /* .suspend = vcodec_suspend, .resume = vcodec_resume, */ .driver = { .name = VCODEC_DEVNAME, .owner = THIS_MODULE, .of_match_table = vcodec_of_match, }, }; static int __init vcodec_driver_init(void) { VAL_RESULT_T eValHWLockRet; VAL_ULONG_T ulFlags, ulFlagsLockHW, ulFlagsISR; MFV_LOGD("+vcodec_driver_init !!\n"); mutex_lock(&DriverOpenCountLock); Driver_Open_Count = 0; mutex_unlock(&DriverOpenCountLock); { struct device_node *node = NULL; node = of_find_compatible_node(NULL, NULL, "mediatek,VENC"); KVA_VENC_BASE = (VAL_ULONG_T)of_iomap(node, 0); VENC_IRQ_ID = irq_of_parse_and_map(node, 0); KVA_VENC_IRQ_STATUS_ADDR = KVA_VENC_BASE + 0x05C; KVA_VENC_IRQ_ACK_ADDR = KVA_VENC_BASE + 0x060; } { struct device_node *node = NULL; node = of_find_compatible_node(NULL, NULL, "mediatek,VDEC_FULL_TOP"); KVA_VDEC_BASE = (VAL_ULONG_T)of_iomap(node, 0); VDEC_IRQ_ID = irq_of_parse_and_map(node, 0); KVA_VDEC_MISC_BASE = KVA_VDEC_BASE + 0x0000; KVA_VDEC_VLD_BASE = KVA_VDEC_BASE + 0x1000; } { struct device_node *node = NULL; node = of_find_compatible_node(NULL, NULL, "mediatek,VDEC_GCON"); KVA_VDEC_GCON_BASE = (VAL_ULONG_T)of_iomap(node, 0); MFV_LOGD("[VCODEC][DeviceTree] KVA_VENC_BASE(0x%lx), KVA_VDEC_BASE(0x%lx), KVA_VDEC_GCON_BASE(0x%lx)", KVA_VENC_BASE, KVA_VDEC_BASE, KVA_VDEC_GCON_BASE); MFV_LOGD("[VCODEC][DeviceTree] VDEC_IRQ_ID(%d), VENC_IRQ_ID(%d)", VDEC_IRQ_ID, VENC_IRQ_ID); } // KVA_VENC_IRQ_STATUS_ADDR = (VAL_ULONG_T)ioremap(VENC_IRQ_STATUS_addr, 4); // KVA_VENC_IRQ_ACK_ADDR = (VAL_ULONG_T)ioremap(VENC_IRQ_ACK_addr, 4); #ifdef VENC_PWR_FPGA // useless 2014_3_4 KVA_VENC_CLK_CFG_0_ADDR = (VAL_ULONG_T)ioremap(CLK_CFG_0_addr, 4); KVA_VENC_CLK_CFG_4_ADDR = (VAL_ULONG_T)ioremap(CLK_CFG_4_addr, 4); KVA_VENC_PWR_ADDR = (VAL_ULONG_T)ioremap(VENC_PWR_addr, 4); KVA_VENCSYS_CG_SET_ADDR = (VAL_ULONG_T)ioremap(VENCSYS_CG_SET_addr, 4); #endif spin_lock_irqsave(&LockDecHWCountLock, ulFlagsLockHW); gu4LockDecHWCount = 0; spin_unlock_irqrestore(&LockDecHWCountLock, ulFlagsLockHW); spin_lock_irqsave(&LockEncHWCountLock, ulFlagsLockHW); gu4LockEncHWCount = 0; spin_unlock_irqrestore(&LockEncHWCountLock, ulFlagsLockHW); spin_lock_irqsave(&DecISRCountLock, ulFlagsISR); gu4DecISRCount = 0; spin_unlock_irqrestore(&DecISRCountLock, ulFlagsISR); spin_lock_irqsave(&EncISRCountLock, ulFlagsISR); gu4EncISRCount = 0; spin_unlock_irqrestore(&EncISRCountLock, ulFlagsISR); mutex_lock(&VdecPWRLock); gu4VdecPWRCounter = 0; mutex_unlock(&VdecPWRLock); mutex_lock(&VencPWRLock); gu4VencPWRCounter = 0; mutex_unlock(&VencPWRLock); mutex_lock(&IsOpenedLock); if (VAL_FALSE == bIsOpened) { bIsOpened = VAL_TRUE; //vcodec_probe(NULL); } mutex_unlock(&IsOpenedLock); mutex_lock(&VdecHWLock); gu4VdecLockThreadId = 0; grVcodecDecHWLock.pvHandle = 0; grVcodecDecHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; grVcodecDecHWLock.rLockedTime.u4Sec = 0; grVcodecDecHWLock.rLockedTime.u4uSec = 0; mutex_unlock(&VdecHWLock); mutex_lock(&VencHWLock); grVcodecEncHWLock.pvHandle = 0; grVcodecEncHWLock.eDriverType = VAL_DRIVER_TYPE_NONE; grVcodecEncHWLock.rLockedTime.u4Sec = 0; grVcodecEncHWLock.rLockedTime.u4uSec = 0; mutex_unlock(&VencHWLock); //HWLockEvent part mutex_lock(&DecHWLockEventTimeoutLock); DecHWLockEvent.pvHandle = "DECHWLOCK_EVENT"; DecHWLockEvent.u4HandleSize = sizeof("DECHWLOCK_EVENT") + 1; DecHWLockEvent.u4TimeoutMs = 1; mutex_unlock(&DecHWLockEventTimeoutLock); eValHWLockRet = eVideoCreateEvent(&DecHWLockEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] create dec hwlock event error\n"); } mutex_lock(&EncHWLockEventTimeoutLock); EncHWLockEvent.pvHandle = "ENCHWLOCK_EVENT"; EncHWLockEvent.u4HandleSize = sizeof("ENCHWLOCK_EVENT") + 1; EncHWLockEvent.u4TimeoutMs = 1; mutex_unlock(&EncHWLockEventTimeoutLock); eValHWLockRet = eVideoCreateEvent(&EncHWLockEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] create enc hwlock event error\n"); } //IsrEvent part spin_lock_irqsave(&DecIsrLock, ulFlags); DecIsrEvent.pvHandle = "DECISR_EVENT"; DecIsrEvent.u4HandleSize = sizeof("DECISR_EVENT") + 1; DecIsrEvent.u4TimeoutMs = 1; spin_unlock_irqrestore(&DecIsrLock, ulFlags); eValHWLockRet = eVideoCreateEvent(&DecIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] create dec isr event error\n"); } spin_lock_irqsave(&EncIsrLock, ulFlags); EncIsrEvent.pvHandle = "ENCISR_EVENT"; EncIsrEvent.u4HandleSize = sizeof("ENCISR_EVENT") + 1; EncIsrEvent.u4TimeoutMs = 1; spin_unlock_irqrestore(&EncIsrLock, ulFlags); eValHWLockRet = eVideoCreateEvent(&EncIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] create enc isr event error\n"); } MFV_LOGD("vcodec_driver_init Done\n"); #ifdef CONFIG_MTK_HIBERNATION register_swsusp_restore_noirq_func(ID_M_VCODEC, vcodec_pm_restore_noirq, NULL); #endif return platform_driver_register(&vcodec_driver); } static void __exit vcodec_driver_exit(void) { VAL_RESULT_T eValHWLockRet; MFV_LOGD("vcodec_driver_exit\n"); mutex_lock(&IsOpenedLock); if (VAL_TRUE == bIsOpened) { bIsOpened = VAL_FALSE; } mutex_unlock(&IsOpenedLock); cdev_del(vcodec_cdev); unregister_chrdev_region(vcodec_devno, 1); // [TODO] iounmap the following? #if 0 iounmap((void *)KVA_VENC_IRQ_STATUS_ADDR); iounmap((void *)KVA_VENC_IRQ_ACK_ADDR); #endif #ifdef VENC_PWR_FPGA iounmap((void *)KVA_VENC_CLK_CFG_0_ADDR); iounmap((void *)KVA_VENC_CLK_CFG_4_ADDR); iounmap((void *)KVA_VENC_PWR_ADDR); iounmap((void *)KVA_VENCSYS_CG_SET_ADDR); #endif // [TODO] free IRQ here //free_irq(MT_VENC_IRQ_ID, NULL); free_irq(VENC_IRQ_ID, NULL); //free_irq(MT_VDEC_IRQ_ID, NULL); free_irq(VDEC_IRQ_ID, NULL); //MT6589_HWLockEvent part eValHWLockRet = eVideoCloseEvent(&DecHWLockEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] close dec hwlock event error\n"); } eValHWLockRet = eVideoCloseEvent(&EncHWLockEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] close enc hwlock event error\n"); } //MT6589_IsrEvent part eValHWLockRet = eVideoCloseEvent(&DecIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] close dec isr event error\n"); } eValHWLockRet = eVideoCloseEvent(&EncIsrEvent, sizeof(VAL_EVENT_T)); if (VAL_RESULT_NO_ERROR != eValHWLockRet) { MFV_LOGE("[VCODEC][ERROR] close enc isr event error\n"); } #ifdef CONFIG_MTK_HIBERNATION unregister_swsusp_restore_noirq_func(ID_M_VCODEC); #endif platform_driver_unregister(&vcodec_driver); } module_init(vcodec_driver_init); module_exit(vcodec_driver_exit); MODULE_AUTHOR("Legis, Lu <legis.lu@mediatek.com>"); MODULE_DESCRIPTION("Denali-1 Vcodec Driver"); MODULE_LICENSE("GPL");
valascus/android_p8000_kernel_nougat
drivers/misc/mediatek/videocodec/mt6735/videocodec_kernel_driver_D1.c
C
gpl-3.0
104,623
/* * linux/ipc/namespace.c * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. */ #include <linux/ipc.h> #include <linux/msg.h> #include <linux/ipc_namespace.h> #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/user_namespace.h> #include <linux/proc_fs.h> #include "util.h" static struct ipc_namespace *create_ipc_ns(void) { struct ipc_namespace *ns; int err; ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); if (ns == NULL) return ERR_PTR(-ENOMEM); err = proc_alloc_inum(&ns->proc_inum); if (err) { kfree(ns); return ERR_PTR(err); } atomic_set(&ns->count, 1); err = mq_init_ns(ns); if (err) { proc_free_inum(ns->proc_inum); kfree(ns); return ERR_PTR(err); } atomic_inc(&nr_ipc_ns); sem_init_ns(ns); msg_init_ns(ns); shm_init_ns(ns); /* * msgmni has already been computed for the new ipc ns. * Thus, do the ipcns creation notification before registering that * new ipcns in the chain. */ ipcns_notify(IPCNS_CREATED); register_ipcns_notifier(ns); return ns; } struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) { if (!(flags & CLONE_NEWIPC)) return get_ipc_ns(ns); return create_ipc_ns(); } /* * free_ipcs - free all ipcs of one type * @ns: the namespace to remove the ipcs from * @ids: the table of ipcs to free * @free: the function called to free each individual ipc * * Called for each kind of ipc when an ipc_namespace exits. */ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) { struct kern_ipc_perm *perm; int next_id; int total, in_use; down_write(&ids->rw_mutex); in_use = ids->in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { perm = idr_find(&ids->ipcs_idr, next_id); if (perm == NULL) continue; ipc_lock_by_ptr(perm); free(ns, perm); total++; } up_write(&ids->rw_mutex); } static void free_ipc_ns(struct ipc_namespace *ns) { /* * Unregistering the hotplug notifier at the beginning guarantees * that the ipc namespace won't be freed while we are inside the * callback routine. Since the blocking_notifier_chain_XXX routines * hold a rw lock on the notifier list, unregister_ipcns_notifier() * won't take the rw lock before blocking_notifier_call_chain() has * released the rd lock. */ unregister_ipcns_notifier(ns); sem_exit_ns(ns); msg_exit_ns(ns); shm_exit_ns(ns); proc_free_inum(ns->proc_inum); kfree(ns); atomic_dec(&nr_ipc_ns); /* * Do the ipcns removal notification after decrementing nr_ipc_ns in * order to have a correct value when recomputing msgmni. */ ipcns_notify(IPCNS_REMOVED); } /* * put_ipc_ns - drop a reference to an ipc namespace. * @ns: the namespace to put * * If this is the last task in the namespace exiting, and * it is dropping the refcount to 0, then it can race with * a task in another ipc namespace but in a mounts namespace * which has this ipcns's mqueuefs mounted, doing some action * with one of the mqueuefs files. That can raise the refcount. * So dropping the refcount, and raising the refcount when * accessing it through the VFS, are protected with mq_lock. * * (Clearly, a task raising the refcount on its own ipc_ns * needn't take mq_lock since it can't race with the last task * in the ipcns exiting). */ void put_ipc_ns(struct ipc_namespace *ns) { if (atomic_dec_and_lock(&ns->count, &mq_lock)) { mq_clear_sbinfo(ns); spin_unlock(&mq_lock); mq_put_mnt(ns); free_ipc_ns(ns); } } static void *ipcns_get(struct task_struct *task) { struct ipc_namespace *ns = NULL; struct nsproxy *nsproxy; rcu_read_lock(); nsproxy = task_nsproxy(task); if (nsproxy) ns = get_ipc_ns(nsproxy->ipc_ns); rcu_read_unlock(); return ns; } static void ipcns_put(void *ns) { return put_ipc_ns(ns); } static int ipcns_install(struct nsproxy *nsproxy, void *ns) { /* Ditch state from the old ipc namespace */ exit_sem(current); put_ipc_ns(nsproxy->ipc_ns); nsproxy->ipc_ns = get_ipc_ns(ns); return 0; } static unsigned int ipcns_inum(void *vp) { struct ipc_namespace *ns = vp; return ns->proc_inum; } const struct proc_ns_operations ipcns_operations = { .name = "ipc", .type = CLONE_NEWIPC, .get = ipcns_get, .put = ipcns_put, .install = ipcns_install, .inum = ipcns_inum, };
augustayu/fastsocket
kernel/ipc/namespace.c
C
gpl-2.0
4,437
/* * CHRP pci routines. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/grackle.h> #include <asm/rtas.h> #include "chrp.h" #include "gg2.h" /* LongTrail */ void __iomem *gg2_pci_config_base; /* * The VLSI Golden Gate II has only 512K of PCI configuration space, so we * limit the bus number to 3 bits */ int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 *val) { volatile void __iomem *cfg_data; struct pci_controller *hose = pci_bus_to_host(bus); if (bus->number > 7) return PCIBIOS_DEVICE_NOT_FOUND; /* * Note: the caller has already checked that off is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off); switch (len) { case 1: *val = in_8(cfg_data); break; case 2: *val = in_le16(cfg_data); break; default: *val = in_le32(cfg_data); break; } return PCIBIOS_SUCCESSFUL; } int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 val) { volatile void __iomem *cfg_data; struct pci_controller *hose = pci_bus_to_host(bus); if (bus->number > 7) return PCIBIOS_DEVICE_NOT_FOUND; /* * Note: the caller has already checked that off is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off); switch (len) { case 1: out_8(cfg_data, val); break; case 2: out_le16(cfg_data, val); break; default: out_le32(cfg_data, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops gg2_pci_ops = { .read = gg2_read_config, .write = gg2_write_config, }; /* * Access functions for PCI config space using RTAS calls. */ int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int ret = -1; int rval; rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); *val = ret; return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int rval; rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL, addr, len, val); return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } static struct pci_ops rtas_pci_ops = { .read = rtas_read_config, .write = rtas_write_config, }; volatile struct Hydra __iomem *Hydra = NULL; int __init hydra_init(void) { struct device_node *np; struct resource r; np = of_find_node_by_name(NULL, "mac-io"); if (np == NULL || of_address_to_resource(np, 0, &r)) { of_node_put(np); return 0; } of_node_put(np); Hydra = ioremap(r.start, r.end-r.start); printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); printk("Hydra Feature_Control was %x", in_le32(&Hydra->Feature_Control)); out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN | HYDRA_FC_SCSI_CELL_EN | HYDRA_FC_SCCA_ENABLE | HYDRA_FC_SCCB_ENABLE | HYDRA_FC_ARB_BYPASS | HYDRA_FC_MPIC_ENABLE | HYDRA_FC_SLOW_SCC_PCLK | HYDRA_FC_MPIC_IS_MASTER)); printk(", now %x\n", in_le32(&Hydra->Feature_Control)); return 1; } #define PRG_CL_RESET_VALID 0x00010000 static void __init setup_python(struct pci_controller *hose, struct device_node *dev) { u32 __iomem *reg; u32 val; struct resource r; if (of_address_to_resource(dev, 0, &r)) { printk(KERN_ERR "No address for Python PCI controller\n"); return; } /* Clear the magic go-slow bit */ reg = ioremap(r.start + 0xf6000, 0x40); BUG_ON(!reg); val = in_be32(&reg[12]); if (val & PRG_CL_RESET_VALID) { out_be32(&reg[12], val & ~PRG_CL_RESET_VALID); in_be32(&reg[12]); } iounmap(reg); setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0); } /* Marvell Discovery II based Pegasos 2 */ static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev) { struct device_node *root = of_find_node_by_path("/"); struct device_node *rtas; rtas = of_find_node_by_name (root, "rtas"); if (rtas) { hose->ops = &rtas_pci_ops; of_node_put(rtas); } else { printk ("RTAS supporting Pegasos OF not found, please upgrade" " your firmware\n"); } pci_add_flags(PCI_REASSIGN_ALL_BUS); /* keep the reference to the root node */ } void __init chrp_find_bridges(void) { struct device_node *dev; const int *bus_range; int len, index = -1; struct pci_controller *hose; const unsigned int *dma; const char *model, *machine; int is_longtrail = 0, is_mot = 0, is_pegasos = 0; struct device_node *root = of_find_node_by_path("/"); struct resource r; /* * The PCI host bridge nodes on some machines don't have * properties to adequately identify them, so we have to * look at what sort of machine this is as well. */ machine = of_get_property(root, "model", NULL); if (machine != NULL) { is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0; is_mot = strncmp(machine, "MOT", 3) == 0; if (strncmp(machine, "Pegasos2", 8) == 0) is_pegasos = 2; else if (strncmp(machine, "Pegasos", 7) == 0) is_pegasos = 1; } for (dev = root->child; dev != NULL; dev = dev->sibling) { if (dev->type == NULL || strcmp(dev->type, "pci") != 0) continue; ++index; /* The GG2 bridge on the LongTrail doesn't have an address */ if (of_address_to_resource(dev, 0, &r) && !is_longtrail) { printk(KERN_WARNING "Can't use %s: no address\n", dev->full_name); continue; } bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s\n", dev->full_name); continue; } if (bus_range[1] == bus_range[0]) printk(KERN_INFO "PCI bus %d", bus_range[0]); else printk(KERN_INFO "PCI buses %d..%d", bus_range[0], bus_range[1]); printk(" controlled by %s", dev->full_name); if (!is_longtrail) printk(" at %llx", (unsigned long long)r.start); printk("\n"); hose = pcibios_alloc_controller(dev); if (!hose) { printk("Can't allocate PCI controller structure for %s\n", dev->full_name); continue; } hose->first_busno = hose->self_busno = bus_range[0]; hose->last_busno = bus_range[1]; model = of_get_property(dev, "model", NULL); if (model == NULL) model = "<none>"; if (strncmp(model, "IBM, Python", 11) == 0) { setup_python(hose, dev); } else if (is_mot || strncmp(model, "Motorola, Grackle", 17) == 0) { setup_grackle(hose); } else if (is_longtrail) { void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000); hose->ops = &gg2_pci_ops; hose->cfg_data = p; gg2_pci_config_base = p; } else if (is_pegasos == 1) { setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc, 0); } else if (is_pegasos == 2) { setup_peg2(hose, dev); } else if (!strncmp(model, "IBM,CPC710", 10)) { setup_indirect_pci(hose, r.start + 0x000f8000, r.start + 0x000f8010, 0); if (index == 0) { dma = of_get_property(dev, "system-dma-base", &len); if (dma && len >= sizeof(*dma)) { dma = (unsigned int *) (((unsigned long)dma) + len - sizeof(*dma)); pci_dram_offset = *dma; } } } else { printk("No methods for %s (model %s), using RTAS\n", dev->full_name, model); hose->ops = &rtas_pci_ops; } pci_process_bridge_OF_ranges(hose, dev, index == 0); /* check the first bridge for a property that we can use to set pci_dram_offset */ dma = of_get_property(dev, "ibm,dma-ranges", &len); if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) { pci_dram_offset = dma[2] - dma[3]; printk("pci_dram_offset = %lx\n", pci_dram_offset); } } of_node_put(root); } /* SL82C105 IDE Control/Status Register */ #define SL82C105_IDECSR 0x40 /* Fixup for Winbond ATA quirk, required for briq mostly because the * 8259 is configured for level sensitive IRQ 14 and so wants the * ATA controller to be set to fully native mode or bad things * will happen. */ static void __devinit chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105) { u8 progif; /* If non-briq machines need that fixup too, please speak up */ if (!machine_is(chrp) || _chrp_type != _CHRP_briq) return; if ((sl82c105->class & 5) != 5) { printk("W83C553: Switching SL82C105 IDE to PCI native mode\n"); /* Enable SL82C105 PCI native IDE mode */ pci_read_config_byte(sl82c105, PCI_CLASS_PROG, &progif); pci_write_config_byte(sl82c105, PCI_CLASS_PROG, progif | 0x05); sl82c105->class |= 0x05; /* Disable SL82C105 second port */ pci_write_config_word(sl82c105, SL82C105_IDECSR, 0x0003); /* Clear IO BARs, they will be reassigned */ pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_0, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_1, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_2, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_3, 0); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, chrp_pci_fixup_winbond_ata); /* Pegasos2 firmware version 20040810 configures the built-in IDE controller * in legacy mode, but sets the PCI registers to PCI native mode. * The chip can only operate in legacy mode, so force the PCI class into legacy * mode as well. The same fixup must be done to the class-code property in * the IDE node /pci@80000000/ide@C,1 */ static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide) { u8 progif; struct pci_dev *viaisa; if (!machine_is(chrp) || _chrp_type != _CHRP_Pegasos) return; if (viaide->irq != 14) return; viaisa = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (!viaisa) return; dev_info(&viaide->dev, "Fixing VIA IDE, force legacy mode on\n"); pci_read_config_byte(viaide, PCI_CLASS_PROG, &progif); pci_write_config_byte(viaide, PCI_CLASS_PROG, progif & ~0x5); viaide->class &= ~0x5; pci_dev_put(viaisa); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
augustayu/fastsocket
kernel/arch/powerpc/platforms/chrp/pci.c
C
gpl-2.0
10,671
/* * Implementation of the userspace SID hashtable. * * Author : Eamon Walsh, <ewalsh@epoch.ncsc.mil> */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include "selinux_internal.h" #include <selinux/avc.h> #include "avc_sidtab.h" #include "avc_internal.h" static inline unsigned sidtab_hash(const char * key) { char *p, *keyp; unsigned int size; unsigned int val; val = 0; keyp = (char *)key; size = strlen(keyp); for (p = keyp; (unsigned int)(p - keyp) < size; p++) val = (val << 4 | (val >> (8 * sizeof(unsigned int) - 4))) ^ (*p); return val & (SIDTAB_SIZE - 1); } int sidtab_init(struct sidtab *s) { int i, rc = 0; s->htable = (struct sidtab_node **)avc_malloc (sizeof(struct sidtab_node *) * SIDTAB_SIZE); if (!s->htable) { rc = -1; goto out; } for (i = 0; i < SIDTAB_SIZE; i++) s->htable[i] = NULL; s->nel = 0; out: return rc; } int sidtab_insert(struct sidtab *s, const char * ctx) { int hvalue, rc = 0; struct sidtab_node *newnode; char * newctx; newnode = (struct sidtab_node *)avc_malloc(sizeof(*newnode)); if (!newnode) { rc = -1; goto out; } newctx = (char *) strdup(ctx); if (!newctx) { rc = -1; avc_free(newnode); goto out; } hvalue = sidtab_hash(newctx); newnode->next = s->htable[hvalue]; newnode->sid_s.ctx = newctx; newnode->sid_s.refcnt = 1; /* unused */ s->htable[hvalue] = newnode; s->nel++; out: return rc; } int sidtab_context_to_sid(struct sidtab *s, const char * ctx, security_id_t * sid) { int hvalue, rc = 0; struct sidtab_node *cur; *sid = NULL; hvalue = sidtab_hash(ctx); loop: cur = s->htable[hvalue]; while (cur != NULL && strcmp(cur->sid_s.ctx, ctx)) cur = cur->next; if (cur == NULL) { /* need to make a new entry */ rc = sidtab_insert(s, ctx); if (rc) goto out; goto loop; /* find the newly inserted node */ } *sid = &cur->sid_s; out: return rc; } void sidtab_sid_stats(struct sidtab *h, char *buf, int buflen) { int i, chain_len, slots_used, max_chain_len; struct sidtab_node *cur; slots_used = 0; max_chain_len = 0; for (i = 0; i < SIDTAB_SIZE; i++) { cur = h->htable[i]; if (cur) { slots_used++; chain_len = 0; while (cur) { chain_len++; cur = cur->next; } if (chain_len > max_chain_len) max_chain_len = chain_len; } } snprintf(buf, buflen, "%s: %d SID entries and %d/%d buckets used, longest " "chain length %d\n", avc_prefix, h->nel, slots_used, SIDTAB_SIZE, max_chain_len); } void sidtab_destroy(struct sidtab *s) { int i; struct sidtab_node *cur, *temp; if (!s) return; for (i = 0; i < SIDTAB_SIZE; i++) { cur = s->htable[i]; while (cur != NULL) { temp = cur; cur = cur->next; freecon(temp->sid_s.ctx); avc_free(temp); } s->htable[i] = NULL; } avc_free(s->htable); s->htable = NULL; }
KubaKaszycki/kubux
libselinux/src/avc_sidtab.c
C
gpl-3.0
2,894
/* * jsimd_i386.c * * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB * Copyright 2009-2011, 2013-2014 D. R. Commander * * Based on the x86 SIMD extension for IJG JPEG library, * Copyright (C) 1999-2006, MIYASAKA Masaru. * For conditions of distribution and use, see copyright notice in jsimdext.inc * * This file contains the interface between the "normal" portions * of the library and the SIMD implementations when running on a * 32-bit x86 architecture. */ #define JPEG_INTERNALS #include "../jinclude.h" #include "../jpeglib.h" #include "../jsimd.h" #include "../jdct.h" #include "../jsimddct.h" #include "jsimd.h" /* * In the PIC cases, we have no guarantee that constants will keep * their alignment. This macro allows us to verify it at runtime. */ #define IS_ALIGNED(ptr, order) (((unsigned)ptr & ((1 << order) - 1)) == 0) #define IS_ALIGNED_SSE(ptr) (IS_ALIGNED(ptr, 4)) /* 16 byte alignment */ static unsigned int simd_support = ~0; /* * Check what SIMD accelerations are supported. * * FIXME: This code is racy under a multi-threaded environment. */ LOCAL(void) init_simd (void) { char *env = NULL; if (simd_support != ~0U) return; simd_support = jpeg_simd_cpu_support(); /* Force different settings through environment variables */ env = getenv("JSIMD_FORCEMMX"); if ((env != NULL) && (strcmp(env, "1") == 0)) simd_support &= JSIMD_MMX; env = getenv("JSIMD_FORCE3DNOW"); if ((env != NULL) && (strcmp(env, "1") == 0)) simd_support &= JSIMD_3DNOW|JSIMD_MMX; env = getenv("JSIMD_FORCESSE"); if ((env != NULL) && (strcmp(env, "1") == 0)) simd_support &= JSIMD_SSE|JSIMD_MMX; env = getenv("JSIMD_FORCESSE2"); if ((env != NULL) && (strcmp(env, "1") == 0)) simd_support &= JSIMD_SSE2; env = getenv("JSIMD_FORCENONE"); if ((env != NULL) && (strcmp(env, "1") == 0)) simd_support = 0; } GLOBAL(int) jsimd_can_rgb_ycc (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4)) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_rgb_ycc_convert_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_rgb_gray (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4)) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_rgb_gray_convert_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_ycc_rgb (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4)) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_ycc_rgb_convert_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_ycc_rgb565 (void) { return 0; } GLOBAL(void) jsimd_rgb_ycc_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { void (*sse2fct)(JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int); void (*mmxfct)(JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int); switch(cinfo->in_color_space) { case JCS_EXT_RGB: sse2fct=jsimd_extrgb_ycc_convert_sse2; mmxfct=jsimd_extrgb_ycc_convert_mmx; break; case JCS_EXT_RGBX: case JCS_EXT_RGBA: sse2fct=jsimd_extrgbx_ycc_convert_sse2; mmxfct=jsimd_extrgbx_ycc_convert_mmx; break; case JCS_EXT_BGR: sse2fct=jsimd_extbgr_ycc_convert_sse2; mmxfct=jsimd_extbgr_ycc_convert_mmx; break; case JCS_EXT_BGRX: case JCS_EXT_BGRA: sse2fct=jsimd_extbgrx_ycc_convert_sse2; mmxfct=jsimd_extbgrx_ycc_convert_mmx; break; case JCS_EXT_XBGR: case JCS_EXT_ABGR: sse2fct=jsimd_extxbgr_ycc_convert_sse2; mmxfct=jsimd_extxbgr_ycc_convert_mmx; break; case JCS_EXT_XRGB: case JCS_EXT_ARGB: sse2fct=jsimd_extxrgb_ycc_convert_sse2; mmxfct=jsimd_extxrgb_ycc_convert_mmx; break; default: sse2fct=jsimd_rgb_ycc_convert_sse2; mmxfct=jsimd_rgb_ycc_convert_mmx; break; } if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_rgb_ycc_convert_sse2)) sse2fct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); else if (simd_support & JSIMD_MMX) mmxfct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); } GLOBAL(void) jsimd_rgb_gray_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { void (*sse2fct)(JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int); void (*mmxfct)(JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int); switch(cinfo->in_color_space) { case JCS_EXT_RGB: sse2fct=jsimd_extrgb_gray_convert_sse2; mmxfct=jsimd_extrgb_gray_convert_mmx; break; case JCS_EXT_RGBX: case JCS_EXT_RGBA: sse2fct=jsimd_extrgbx_gray_convert_sse2; mmxfct=jsimd_extrgbx_gray_convert_mmx; break; case JCS_EXT_BGR: sse2fct=jsimd_extbgr_gray_convert_sse2; mmxfct=jsimd_extbgr_gray_convert_mmx; break; case JCS_EXT_BGRX: case JCS_EXT_BGRA: sse2fct=jsimd_extbgrx_gray_convert_sse2; mmxfct=jsimd_extbgrx_gray_convert_mmx; break; case JCS_EXT_XBGR: case JCS_EXT_ABGR: sse2fct=jsimd_extxbgr_gray_convert_sse2; mmxfct=jsimd_extxbgr_gray_convert_mmx; break; case JCS_EXT_XRGB: case JCS_EXT_ARGB: sse2fct=jsimd_extxrgb_gray_convert_sse2; mmxfct=jsimd_extxrgb_gray_convert_mmx; break; default: sse2fct=jsimd_rgb_gray_convert_sse2; mmxfct=jsimd_rgb_gray_convert_mmx; break; } if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_rgb_gray_convert_sse2)) sse2fct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); else if (simd_support & JSIMD_MMX) mmxfct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); } GLOBAL(void) jsimd_ycc_rgb_convert (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) { void (*sse2fct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY, int); void (*mmxfct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY, int); switch(cinfo->out_color_space) { case JCS_EXT_RGB: sse2fct=jsimd_ycc_extrgb_convert_sse2; mmxfct=jsimd_ycc_extrgb_convert_mmx; break; case JCS_EXT_RGBX: case JCS_EXT_RGBA: sse2fct=jsimd_ycc_extrgbx_convert_sse2; mmxfct=jsimd_ycc_extrgbx_convert_mmx; break; case JCS_EXT_BGR: sse2fct=jsimd_ycc_extbgr_convert_sse2; mmxfct=jsimd_ycc_extbgr_convert_mmx; break; case JCS_EXT_BGRX: case JCS_EXT_BGRA: sse2fct=jsimd_ycc_extbgrx_convert_sse2; mmxfct=jsimd_ycc_extbgrx_convert_mmx; break; case JCS_EXT_XBGR: case JCS_EXT_ABGR: sse2fct=jsimd_ycc_extxbgr_convert_sse2; mmxfct=jsimd_ycc_extxbgr_convert_mmx; break; case JCS_EXT_XRGB: case JCS_EXT_ARGB: sse2fct=jsimd_ycc_extxrgb_convert_sse2; mmxfct=jsimd_ycc_extxrgb_convert_mmx; break; default: sse2fct=jsimd_ycc_rgb_convert_sse2; mmxfct=jsimd_ycc_rgb_convert_mmx; break; } if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_ycc_rgb_convert_sse2)) sse2fct(cinfo->output_width, input_buf, input_row, output_buf, num_rows); else if (simd_support & JSIMD_MMX) mmxfct(cinfo->output_width, input_buf, input_row, output_buf, num_rows); } GLOBAL(void) jsimd_ycc_rgb565_convert (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) { } GLOBAL(int) jsimd_can_h2v2_downsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_h2v1_downsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(void) jsimd_h2v2_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY output_data) { if (simd_support & JSIMD_SSE2) jsimd_h2v2_downsample_sse2(cinfo->image_width, cinfo->max_v_samp_factor, compptr->v_samp_factor, compptr->width_in_blocks, input_data, output_data); else if (simd_support & JSIMD_MMX) jsimd_h2v2_downsample_mmx(cinfo->image_width, cinfo->max_v_samp_factor, compptr->v_samp_factor, compptr->width_in_blocks, input_data, output_data); } GLOBAL(void) jsimd_h2v1_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY output_data) { if (simd_support & JSIMD_SSE2) jsimd_h2v1_downsample_sse2(cinfo->image_width, cinfo->max_v_samp_factor, compptr->v_samp_factor, compptr->width_in_blocks, input_data, output_data); else if (simd_support & JSIMD_MMX) jsimd_h2v1_downsample_mmx(cinfo->image_width, cinfo->max_v_samp_factor, compptr->v_samp_factor, compptr->width_in_blocks, input_data, output_data); } GLOBAL(int) jsimd_can_h2v2_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_h2v1_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(void) jsimd_h2v2_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) { if (simd_support & JSIMD_SSE2) jsimd_h2v2_upsample_sse2(cinfo->max_v_samp_factor, cinfo->output_width, input_data, output_data_ptr); else if (simd_support & JSIMD_MMX) jsimd_h2v2_upsample_mmx(cinfo->max_v_samp_factor, cinfo->output_width, input_data, output_data_ptr); } GLOBAL(void) jsimd_h2v1_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) { if (simd_support & JSIMD_SSE2) jsimd_h2v1_upsample_sse2(cinfo->max_v_samp_factor, cinfo->output_width, input_data, output_data_ptr); else if (simd_support & JSIMD_MMX) jsimd_h2v1_upsample_mmx(cinfo->max_v_samp_factor, cinfo->output_width, input_data, output_data_ptr); } GLOBAL(int) jsimd_can_h2v2_fancy_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fancy_upsample_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_h2v1_fancy_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fancy_upsample_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(void) jsimd_h2v2_fancy_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fancy_upsample_sse2)) jsimd_h2v2_fancy_upsample_sse2(cinfo->max_v_samp_factor, compptr->downsampled_width, input_data, output_data_ptr); else if (simd_support & JSIMD_MMX) jsimd_h2v2_fancy_upsample_mmx(cinfo->max_v_samp_factor, compptr->downsampled_width, input_data, output_data_ptr); } GLOBAL(void) jsimd_h2v1_fancy_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fancy_upsample_sse2)) jsimd_h2v1_fancy_upsample_sse2(cinfo->max_v_samp_factor, compptr->downsampled_width, input_data, output_data_ptr); else if (simd_support & JSIMD_MMX) jsimd_h2v1_fancy_upsample_mmx(cinfo->max_v_samp_factor, compptr->downsampled_width, input_data, output_data_ptr); } GLOBAL(int) jsimd_can_h2v2_merged_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_merged_upsample_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_h2v1_merged_upsample (void) { init_simd(); /* The code is optimised for these values only */ if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_merged_upsample_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(void) jsimd_h2v2_merged_upsample (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) { void (*sse2fct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY); void (*mmxfct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY); switch(cinfo->out_color_space) { case JCS_EXT_RGB: sse2fct=jsimd_h2v2_extrgb_merged_upsample_sse2; mmxfct=jsimd_h2v2_extrgb_merged_upsample_mmx; break; case JCS_EXT_RGBX: case JCS_EXT_RGBA: sse2fct=jsimd_h2v2_extrgbx_merged_upsample_sse2; mmxfct=jsimd_h2v2_extrgbx_merged_upsample_mmx; break; case JCS_EXT_BGR: sse2fct=jsimd_h2v2_extbgr_merged_upsample_sse2; mmxfct=jsimd_h2v2_extbgr_merged_upsample_mmx; break; case JCS_EXT_BGRX: case JCS_EXT_BGRA: sse2fct=jsimd_h2v2_extbgrx_merged_upsample_sse2; mmxfct=jsimd_h2v2_extbgrx_merged_upsample_mmx; break; case JCS_EXT_XBGR: case JCS_EXT_ABGR: sse2fct=jsimd_h2v2_extxbgr_merged_upsample_sse2; mmxfct=jsimd_h2v2_extxbgr_merged_upsample_mmx; break; case JCS_EXT_XRGB: case JCS_EXT_ARGB: sse2fct=jsimd_h2v2_extxrgb_merged_upsample_sse2; mmxfct=jsimd_h2v2_extxrgb_merged_upsample_mmx; break; default: sse2fct=jsimd_h2v2_merged_upsample_sse2; mmxfct=jsimd_h2v2_merged_upsample_mmx; break; } if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_merged_upsample_sse2)) sse2fct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf); else if (simd_support & JSIMD_MMX) mmxfct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf); } GLOBAL(void) jsimd_h2v1_merged_upsample (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) { void (*sse2fct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY); void (*mmxfct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY); switch(cinfo->out_color_space) { case JCS_EXT_RGB: sse2fct=jsimd_h2v1_extrgb_merged_upsample_sse2; mmxfct=jsimd_h2v1_extrgb_merged_upsample_mmx; break; case JCS_EXT_RGBX: case JCS_EXT_RGBA: sse2fct=jsimd_h2v1_extrgbx_merged_upsample_sse2; mmxfct=jsimd_h2v1_extrgbx_merged_upsample_mmx; break; case JCS_EXT_BGR: sse2fct=jsimd_h2v1_extbgr_merged_upsample_sse2; mmxfct=jsimd_h2v1_extbgr_merged_upsample_mmx; break; case JCS_EXT_BGRX: case JCS_EXT_BGRA: sse2fct=jsimd_h2v1_extbgrx_merged_upsample_sse2; mmxfct=jsimd_h2v1_extbgrx_merged_upsample_mmx; break; case JCS_EXT_XBGR: case JCS_EXT_ABGR: sse2fct=jsimd_h2v1_extxbgr_merged_upsample_sse2; mmxfct=jsimd_h2v1_extxbgr_merged_upsample_mmx; break; case JCS_EXT_XRGB: case JCS_EXT_ARGB: sse2fct=jsimd_h2v1_extxrgb_merged_upsample_sse2; mmxfct=jsimd_h2v1_extxrgb_merged_upsample_mmx; break; default: sse2fct=jsimd_h2v1_merged_upsample_sse2; mmxfct=jsimd_h2v1_merged_upsample_mmx; break; } if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_merged_upsample_sse2)) sse2fct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf); else if (simd_support & JSIMD_MMX) mmxfct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf); } GLOBAL(int) jsimd_can_convsamp (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(DCTELEM) != 2) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_convsamp_float (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(FAST_FLOAT) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_SSE) return 1; if (simd_support & JSIMD_3DNOW) return 1; return 0; } GLOBAL(void) jsimd_convsamp (JSAMPARRAY sample_data, JDIMENSION start_col, DCTELEM * workspace) { if (simd_support & JSIMD_SSE2) jsimd_convsamp_sse2(sample_data, start_col, workspace); else if (simd_support & JSIMD_MMX) jsimd_convsamp_mmx(sample_data, start_col, workspace); } GLOBAL(void) jsimd_convsamp_float (JSAMPARRAY sample_data, JDIMENSION start_col, FAST_FLOAT * workspace) { if (simd_support & JSIMD_SSE2) jsimd_convsamp_float_sse2(sample_data, start_col, workspace); else if (simd_support & JSIMD_SSE) jsimd_convsamp_float_sse(sample_data, start_col, workspace); else if (simd_support & JSIMD_3DNOW) jsimd_convsamp_float_3dnow(sample_data, start_col, workspace); } GLOBAL(int) jsimd_can_fdct_islow (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(DCTELEM) != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fdct_islow_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_fdct_ifast (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(DCTELEM) != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fdct_ifast_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_fdct_float (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(FAST_FLOAT) != 4) return 0; if ((simd_support & JSIMD_SSE) && IS_ALIGNED_SSE(jconst_fdct_float_sse)) return 1; if (simd_support & JSIMD_3DNOW) return 1; return 0; } GLOBAL(void) jsimd_fdct_islow (DCTELEM * data) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fdct_islow_sse2)) jsimd_fdct_islow_sse2(data); else if (simd_support & JSIMD_MMX) jsimd_fdct_islow_mmx(data); } GLOBAL(void) jsimd_fdct_ifast (DCTELEM * data) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_fdct_islow_sse2)) jsimd_fdct_ifast_sse2(data); else if (simd_support & JSIMD_MMX) jsimd_fdct_ifast_mmx(data); } GLOBAL(void) jsimd_fdct_float (FAST_FLOAT * data) { if ((simd_support & JSIMD_SSE) && IS_ALIGNED_SSE(jconst_fdct_float_sse)) jsimd_fdct_float_sse(data); else if (simd_support & JSIMD_3DNOW) jsimd_fdct_float_3dnow(data); } GLOBAL(int) jsimd_can_quantize (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (sizeof(DCTELEM) != 2) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_quantize_float (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (sizeof(FAST_FLOAT) != 4) return 0; if (simd_support & JSIMD_SSE2) return 1; if (simd_support & JSIMD_SSE) return 1; if (simd_support & JSIMD_3DNOW) return 1; return 0; } GLOBAL(void) jsimd_quantize (JCOEFPTR coef_block, DCTELEM * divisors, DCTELEM * workspace) { if (simd_support & JSIMD_SSE2) jsimd_quantize_sse2(coef_block, divisors, workspace); else if (simd_support & JSIMD_MMX) jsimd_quantize_mmx(coef_block, divisors, workspace); } GLOBAL(void) jsimd_quantize_float (JCOEFPTR coef_block, FAST_FLOAT * divisors, FAST_FLOAT * workspace) { if (simd_support & JSIMD_SSE2) jsimd_quantize_float_sse2(coef_block, divisors, workspace); else if (simd_support & JSIMD_SSE) jsimd_quantize_float_sse(coef_block, divisors, workspace); else if (simd_support & JSIMD_3DNOW) jsimd_quantize_float_3dnow(coef_block, divisors, workspace); } GLOBAL(int) jsimd_can_idct_2x2 (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(ISLOW_MULT_TYPE) != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_red_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_idct_4x4 (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(ISLOW_MULT_TYPE) != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_red_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(void) jsimd_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_red_sse2)) jsimd_idct_2x2_sse2(compptr->dct_table, coef_block, output_buf, output_col); else if (simd_support & JSIMD_MMX) jsimd_idct_2x2_mmx(compptr->dct_table, coef_block, output_buf, output_col); } GLOBAL(void) jsimd_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_red_sse2)) jsimd_idct_4x4_sse2(compptr->dct_table, coef_block, output_buf, output_col); else if (simd_support & JSIMD_MMX) jsimd_idct_4x4_mmx(compptr->dct_table, coef_block, output_buf, output_col); } GLOBAL(int) jsimd_can_idct_islow (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(ISLOW_MULT_TYPE) != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_islow_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_idct_ifast (void) { init_simd(); /* The code is optimised for these values only */ if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(IFAST_MULT_TYPE) != 2) return 0; if (IFAST_SCALE_BITS != 2) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_ifast_sse2)) return 1; if (simd_support & JSIMD_MMX) return 1; return 0; } GLOBAL(int) jsimd_can_idct_float (void) { init_simd(); if (DCTSIZE != 8) return 0; if (sizeof(JCOEF) != 2) return 0; if (BITS_IN_JSAMPLE != 8) return 0; if (sizeof(JDIMENSION) != 4) return 0; if (sizeof(FAST_FLOAT) != 4) return 0; if (sizeof(FLOAT_MULT_TYPE) != 4) return 0; if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_float_sse2)) return 1; if ((simd_support & JSIMD_SSE) && IS_ALIGNED_SSE(jconst_idct_float_sse)) return 1; if (simd_support & JSIMD_3DNOW) return 1; return 0; } GLOBAL(void) jsimd_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_islow_sse2)) jsimd_idct_islow_sse2(compptr->dct_table, coef_block, output_buf, output_col); else if (simd_support & JSIMD_MMX) jsimd_idct_islow_mmx(compptr->dct_table, coef_block, output_buf, output_col); } GLOBAL(void) jsimd_idct_ifast (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_ifast_sse2)) jsimd_idct_ifast_sse2(compptr->dct_table, coef_block, output_buf, output_col); else if (simd_support & JSIMD_MMX) jsimd_idct_ifast_mmx(compptr->dct_table, coef_block, output_buf, output_col); } GLOBAL(void) jsimd_idct_float (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { if ((simd_support & JSIMD_SSE2) && IS_ALIGNED_SSE(jconst_idct_float_sse2)) jsimd_idct_float_sse2(compptr->dct_table, coef_block, output_buf, output_col); else if ((simd_support & JSIMD_SSE) && IS_ALIGNED_SSE(jconst_idct_float_sse)) jsimd_idct_float_sse(compptr->dct_table, coef_block, output_buf, output_col); else if (simd_support & JSIMD_3DNOW) jsimd_idct_float_3dnow(compptr->dct_table, coef_block, output_buf, output_col); }
cedewey/sol
wp-content/themes/gulp-dev/node_modules/mozjpeg/f47f5773-89ce-4b83-810e-48145ecc3389/simd/jsimd_i386.c
C
gpl-3.0
28,380
/* linpack/csvdc.f -- translated by f2c (version 20050501). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ #ifdef __cplusplus extern "C" { #endif #include "v3p_netlib.h" /* Table of constant values */ static integer c__1 = 1; static complex c_b8 = {(float)1.,(float)0.}; static complex c_b53 = {(float)-1.,(float)0.}; /*< subroutine csvdc(x,ldx,n,p,s,e,u,ldu,v,ldv,work,job,info) >*/ /* Subroutine */ int csvdc_(complex *x, integer *ldx, integer *n, integer *p, complex *s, complex *e, complex *u, integer *ldu, complex *v, integer *ldv, complex *work, integer *job, integer *info) { /* System generated locals */ integer x_dim1, x_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4; real r__1, r__2, r__3, r__4; complex q__1, q__2, q__3; /* Builtin functions */ double r_imag(complex *), c_abs(complex *); void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *); double sqrt(doublereal); /* Local variables */ real b, c__, f, g; integer i__, j, k, l=0, m; complex r__, t; real t1, el; integer kk; real cs; integer ll, mm, ls=0; real sl; integer lu; real sm, sn; integer lm1, mm1, lp1, mp1, nct, ncu, lls, nrt; real emm1, smm1; integer kase, jobu, iter; real test; integer nctp1, nrtp1; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *); real scale; extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer *, complex *, integer *); real shift; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer maxit; extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, integer *, complex *, integer *), csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *); logical wantu, wantv; extern /* Subroutine */ int srotg_(real *, real *, real *, real *); real ztest; extern doublereal scnrm2_(integer *, complex *, integer *); /*< integer ldx,n,p,ldu,ldv,job,info >*/ /*< complex x(ldx,1),s(1),e(1),u(ldu,1),v(ldv,1),work(1) >*/ /* csvdc is a subroutine to reduce a complex nxp matrix x by */ /* unitary transformations u and v to diagonal form. the */ /* diagonal elements s(i) are the singular values of x. the */ /* columns of u are the corresponding left singular vectors, */ /* and the columns of v the right singular vectors. */ /* on entry */ /* x complex(ldx,p), where ldx.ge.n. */ /* x contains the matrix whose singular value */ /* decomposition is to be computed. x is */ /* destroyed by csvdc. */ /* ldx integer. */ /* ldx is the leading dimension of the array x. */ /* n integer. */ /* n is the number of rows of the matrix x. */ /* p integer. */ /* p is the number of columns of the matrix x. */ /* ldu integer. */ /* ldu is the leading dimension of the array u */ /* (see below). */ /* ldv integer. */ /* ldv is the leading dimension of the array v */ /* (see below). */ /* work complex(n). */ /* work is a scratch array. */ /* job integer. */ /* job controls the computation of the singular */ /* vectors. it has the decimal expansion ab */ /* with the following meaning */ /* a.eq.0 do not compute the left singular */ /* vectors. */ /* a.eq.1 return the n left singular vectors */ /* in u. */ /* a.ge.2 returns the first min(n,p) */ /* left singular vectors in u. */ /* b.eq.0 do not compute the right singular */ /* vectors. */ /* b.eq.1 return the right singular vectors */ /* in v. */ /* on return */ /* s complex(mm), where mm=min(n+1,p). */ /* the first min(n,p) entries of s contain the */ /* singular values of x arranged in descending */ /* order of magnitude. */ /* e complex(p). */ /* e ordinarily contains zeros. however see the */ /* discussion of info for exceptions. */ /* u complex(ldu,k), where ldu.ge.n. if joba.eq.1 then */ /* k.eq.n, if joba.ge.2 then */ /* k.eq.min(n,p). */ /* u contains the matrix of left singular vectors. */ /* u is not referenced if joba.eq.0. if n.le.p */ /* or if joba.gt.2, then u may be identified with x */ /* in the subroutine call. */ /* v complex(ldv,p), where ldv.ge.p. */ /* v contains the matrix of right singular vectors. */ /* v is not referenced if jobb.eq.0. if p.le.n, */ /* then v may be identified whth x in the */ /* subroutine call. */ /* info integer. */ /* the singular values (and their corresponding */ /* singular vectors) s(info+1),s(info+2),...,s(m) */ /* are correct (here m=min(n,p)). thus if */ /* info.eq.0, all the singular values and their */ /* vectors are correct. in any event, the matrix */ /* b = ctrans(u)*x*v is the bidiagonal matrix */ /* with the elements of s on its diagonal and the */ /* elements of e on its super-diagonal (ctrans(u) */ /* is the conjugate-transpose of u). thus the */ /* singular values of x and b are the same. */ /* linpack. this version dated 03/19/79 . */ /* correction to shift calculation made 2/85. */ /* g.w. stewart, university of maryland, argonne national lab. */ /* csvdc uses the following functions and subprograms. */ /* external csrot */ /* blas caxpy,cdotc,cscal,cswap,scnrm2,srotg */ /* fortran abs,aimag,amax1,cabs,cmplx */ /* fortran conjg,max0,min0,mod,real,sqrt */ /* internal variables */ /*< >*/ /*< complex cdotc,t,r >*/ /*< >*/ /*< logical wantu,wantv >*/ /*< complex csign,zdum,zdum1,zdum2 >*/ /*< real cabs1 >*/ /*< cabs1(zdum) = abs(real(zdum)) + abs(aimag(zdum)) >*/ /*< csign(zdum1,zdum2) = cabs(zdum1)*(zdum2/cabs(zdum2)) >*/ /* set the maximum number of iterations. */ /*< maxit = 1000 >*/ /* Parameter adjustments */ x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; --s; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1; u -= u_offset; v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; --work; /* Function Body */ maxit = 1000; /* determine what is to be computed. */ /*< wantu = .false. >*/ wantu = FALSE_; /*< wantv = .false. >*/ wantv = FALSE_; /*< jobu = mod(job,100)/10 >*/ jobu = *job % 100 / 10; /*< ncu = n >*/ ncu = *n; /*< if (jobu .gt. 1) ncu = min0(n,p) >*/ if (jobu > 1) { ncu = min(*n,*p); } /*< if (jobu .ne. 0) wantu = .true. >*/ if (jobu != 0) { wantu = TRUE_; } /*< if (mod(job,10) .ne. 0) wantv = .true. >*/ if (*job % 10 != 0) { wantv = TRUE_; } /* reduce x to bidiagonal form, storing the diagonal elements */ /* in s and the super-diagonal elements in e. */ /*< info = 0 >*/ *info = 0; /*< nct = min0(n-1,p) >*/ /* Computing MIN */ i__1 = *n - 1; nct = min(i__1,*p); /*< nrt = max0(0,min0(p-2,n)) >*/ /* Computing MAX */ /* Computing MIN */ i__3 = *p - 2; i__1 = 0, i__2 = min(i__3,*n); nrt = max(i__1,i__2); /*< lu = max0(nct,nrt) >*/ lu = max(nct,nrt); /*< if (lu .lt. 1) go to 170 >*/ if (lu < 1) { goto L170; } /*< do 160 l = 1, lu >*/ i__1 = lu; for (l = 1; l <= i__1; ++l) { /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (l .gt. nct) go to 20 >*/ if (l > nct) { goto L20; } /* compute the transformation for the l-th column and */ /* place the l-th diagonal in s(l). */ /*< s(l) = cmplx(scnrm2(n-l+1,x(l,l),1),0.0e0) >*/ i__2 = l; i__3 = *n - l + 1; r__1 = scnrm2_(&i__3, &x[l + l * x_dim1], &c__1); q__1.r = r__1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (cabs1(s(l)) .eq. 0.0e0) go to 10 >*/ i__2 = l; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2) ) == (float)0.) { goto L10; } /*< if (cabs1(x(l,l)) .ne. 0.0e0) s(l) = csign(s(l),x(l,l)) >*/ i__2 = l + l * x_dim1; if ((r__1 = x[i__2].r, dabs(r__1)) + (r__2 = r_imag(&x[l + l * x_dim1] ), dabs(r__2)) != (float)0.) { i__3 = l; r__3 = c_abs(&s[l]); i__4 = l + l * x_dim1; r__4 = c_abs(&x[l + l * x_dim1]); q__2.r = x[i__4].r / r__4, q__2.i = x[i__4].i / r__4; q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i; s[i__3].r = q__1.r, s[i__3].i = q__1.i; } /*< call cscal(n-l+1,1.0e0/s(l),x(l,l),1) >*/ i__2 = *n - l + 1; c_div(&q__1, &c_b8, &s[l]); cscal_(&i__2, &q__1, &x[l + l * x_dim1], &c__1); /*< x(l,l) = (1.0e0,0.0e0) + x(l,l) >*/ i__2 = l + l * x_dim1; i__3 = l + l * x_dim1; q__1.r = x[i__3].r + (float)1., q__1.i = x[i__3].i + (float)0.; x[i__2].r = q__1.r, x[i__2].i = q__1.i; /*< 10 continue >*/ L10: /*< s(l) = -s(l) >*/ i__2 = l; i__3 = l; q__1.r = -s[i__3].r, q__1.i = -s[i__3].i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< 20 continue >*/ L20: /*< if (p .lt. lp1) go to 50 >*/ if (*p < lp1) { goto L50; } /*< do 40 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< if (l .gt. nct) go to 30 >*/ if (l > nct) { goto L30; } /*< if (cabs1(s(l)) .eq. 0.0e0) go to 30 >*/ i__3 = l; if ((r__1 = s[i__3].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs( r__2)) == (float)0.) { goto L30; } /* apply the transformation. */ /*< t = -cdotc(n-l+1,x(l,l),1,x(l,j),1)/x(l,l) >*/ i__3 = *n - l + 1; cdotc_(&q__3, &i__3, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1] , &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &x[l + l * x_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(n-l+1,t,x(l,l),1,x(l,j),1) >*/ i__3 = *n - l + 1; caxpy_(&i__3, &t, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1], & c__1); /*< 30 continue >*/ L30: /* place the l-th row of x into e for the */ /* subsequent calculation of the row transformation. */ /*< e(j) = conjg(x(l,j)) >*/ i__3 = j; r_cnjg(&q__1, &x[l + j * x_dim1]); e[i__3].r = q__1.r, e[i__3].i = q__1.i; /*< 40 continue >*/ /* L40: */ } /*< 50 continue >*/ L50: /*< if (.not.wantu .or. l .gt. nct) go to 70 >*/ if (! wantu || l > nct) { goto L70; } /* place the transformation in u for subsequent back */ /* multiplication. */ /*< do 60 i = l, n >*/ i__2 = *n; for (i__ = l; i__ <= i__2; ++i__) { /*< u(i,l) = x(i,l) >*/ i__3 = i__ + l * u_dim1; i__4 = i__ + l * x_dim1; u[i__3].r = x[i__4].r, u[i__3].i = x[i__4].i; /*< 60 continue >*/ /* L60: */ } /*< 70 continue >*/ L70: /*< if (l .gt. nrt) go to 150 >*/ if (l > nrt) { goto L150; } /* compute the l-th row transformation and place the */ /* l-th super-diagonal in e(l). */ /*< e(l) = cmplx(scnrm2(p-l,e(lp1),1),0.0e0) >*/ i__2 = l; i__3 = *p - l; r__1 = scnrm2_(&i__3, &e[lp1], &c__1); q__1.r = r__1, q__1.i = (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (cabs1(e(l)) .eq. 0.0e0) go to 80 >*/ i__2 = l; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2) ) == (float)0.) { goto L80; } /*< if (cabs1(e(lp1)) .ne. 0.0e0) e(l) = csign(e(l),e(lp1)) >*/ i__2 = lp1; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[lp1]), dabs( r__2)) != (float)0.) { i__3 = l; r__3 = c_abs(&e[l]); i__4 = lp1; r__4 = c_abs(&e[lp1]); q__2.r = e[i__4].r / r__4, q__2.i = e[i__4].i / r__4; q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i; e[i__3].r = q__1.r, e[i__3].i = q__1.i; } /*< call cscal(p-l,1.0e0/e(l),e(lp1),1) >*/ i__2 = *p - l; c_div(&q__1, &c_b8, &e[l]); cscal_(&i__2, &q__1, &e[lp1], &c__1); /*< e(lp1) = (1.0e0,0.0e0) + e(lp1) >*/ i__2 = lp1; i__3 = lp1; q__1.r = e[i__3].r + (float)1., q__1.i = e[i__3].i + (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< 80 continue >*/ L80: /*< e(l) = -conjg(e(l)) >*/ i__2 = l; r_cnjg(&q__2, &e[l]); q__1.r = -q__2.r, q__1.i = -q__2.i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (lp1 .gt. n .or. cabs1(e(l)) .eq. 0.0e0) go to 120 >*/ i__2 = l; if (lp1 > *n || (r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]) , dabs(r__2)) == (float)0.) { goto L120; } /* apply the transformation. */ /*< do 90 i = lp1, n >*/ i__2 = *n; for (i__ = lp1; i__ <= i__2; ++i__) { /*< work(i) = (0.0e0,0.0e0) >*/ i__3 = i__; work[i__3].r = (float)0., work[i__3].i = (float)0.; /*< 90 continue >*/ /* L90: */ } /*< do 100 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< call caxpy(n-l,e(j),x(lp1,j),1,work(lp1),1) >*/ i__3 = *n - l; caxpy_(&i__3, &e[j], &x[lp1 + j * x_dim1], &c__1, &work[lp1], & c__1); /*< 100 continue >*/ /* L100: */ } /*< do 110 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< >*/ i__3 = *n - l; i__4 = j; q__3.r = -e[i__4].r, q__3.i = -e[i__4].i; c_div(&q__2, &q__3, &e[lp1]); r_cnjg(&q__1, &q__2); caxpy_(&i__3, &q__1, &work[lp1], &c__1, &x[lp1 + j * x_dim1], & c__1); /*< 110 continue >*/ /* L110: */ } /*< 120 continue >*/ L120: /*< if (.not.wantv) go to 140 >*/ if (! wantv) { goto L140; } /* place the transformation in v for subsequent */ /* back multiplication. */ /*< do 130 i = lp1, p >*/ i__2 = *p; for (i__ = lp1; i__ <= i__2; ++i__) { /*< v(i,l) = e(i) >*/ i__3 = i__ + l * v_dim1; i__4 = i__; v[i__3].r = e[i__4].r, v[i__3].i = e[i__4].i; /*< 130 continue >*/ /* L130: */ } /*< 140 continue >*/ L140: /*< 150 continue >*/ L150: /*< 160 continue >*/ /* L160: */ ; } /*< 170 continue >*/ L170: /* set up the final bidiagonal matrix or order m. */ /*< m = min0(p,n+1) >*/ /* Computing MIN */ i__1 = *p, i__2 = *n + 1; m = min(i__1,i__2); /*< nctp1 = nct + 1 >*/ nctp1 = nct + 1; /*< nrtp1 = nrt + 1 >*/ nrtp1 = nrt + 1; /*< if (nct .lt. p) s(nctp1) = x(nctp1,nctp1) >*/ if (nct < *p) { i__1 = nctp1; i__2 = nctp1 + nctp1 * x_dim1; s[i__1].r = x[i__2].r, s[i__1].i = x[i__2].i; } /*< if (n .lt. m) s(m) = (0.0e0,0.0e0) >*/ if (*n < m) { i__1 = m; s[i__1].r = (float)0., s[i__1].i = (float)0.; } /*< if (nrtp1 .lt. m) e(nrtp1) = x(nrtp1,m) >*/ if (nrtp1 < m) { i__1 = nrtp1; i__2 = nrtp1 + m * x_dim1; e[i__1].r = x[i__2].r, e[i__1].i = x[i__2].i; } /*< e(m) = (0.0e0,0.0e0) >*/ i__1 = m; e[i__1].r = (float)0., e[i__1].i = (float)0.; /* if required, generate u. */ /*< if (.not.wantu) go to 300 >*/ if (! wantu) { goto L300; } /*< if (ncu .lt. nctp1) go to 200 >*/ if (ncu < nctp1) { goto L200; } /*< do 190 j = nctp1, ncu >*/ i__1 = ncu; for (j = nctp1; j <= i__1; ++j) { /*< do 180 i = 1, n >*/ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,j) = (0.0e0,0.0e0) >*/ i__3 = i__ + j * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 180 continue >*/ /* L180: */ } /*< u(j,j) = (1.0e0,0.0e0) >*/ i__2 = j + j * u_dim1; u[i__2].r = (float)1., u[i__2].i = (float)0.; /*< 190 continue >*/ /* L190: */ } /*< 200 continue >*/ L200: /*< if (nct .lt. 1) go to 290 >*/ if (nct < 1) { goto L290; } /*< do 280 ll = 1, nct >*/ i__1 = nct; for (ll = 1; ll <= i__1; ++ll) { /*< l = nct - ll + 1 >*/ l = nct - ll + 1; /*< if (cabs1(s(l)) .eq. 0.0e0) go to 250 >*/ i__2 = l; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2) ) == (float)0.) { goto L250; } /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (ncu .lt. lp1) go to 220 >*/ if (ncu < lp1) { goto L220; } /*< do 210 j = lp1, ncu >*/ i__2 = ncu; for (j = lp1; j <= i__2; ++j) { /*< t = -cdotc(n-l+1,u(l,l),1,u(l,j),1)/u(l,l) >*/ i__3 = *n - l + 1; cdotc_(&q__3, &i__3, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1] , &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &u[l + l * u_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(n-l+1,t,u(l,l),1,u(l,j),1) >*/ i__3 = *n - l + 1; caxpy_(&i__3, &t, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1], & c__1); /*< 210 continue >*/ /* L210: */ } /*< 220 continue >*/ L220: /*< call cscal(n-l+1,(-1.0e0,0.0e0),u(l,l),1) >*/ i__2 = *n - l + 1; cscal_(&i__2, &c_b53, &u[l + l * u_dim1], &c__1); /*< u(l,l) = (1.0e0,0.0e0) + u(l,l) >*/ i__2 = l + l * u_dim1; i__3 = l + l * u_dim1; q__1.r = u[i__3].r + (float)1., q__1.i = u[i__3].i + (float)0.; u[i__2].r = q__1.r, u[i__2].i = q__1.i; /*< lm1 = l - 1 >*/ lm1 = l - 1; /*< if (lm1 .lt. 1) go to 240 >*/ if (lm1 < 1) { goto L240; } /*< do 230 i = 1, lm1 >*/ i__2 = lm1; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 230 continue >*/ /* L230: */ } /*< 240 continue >*/ L240: /*< go to 270 >*/ goto L270; /*< 250 continue >*/ L250: /*< do 260 i = 1, n >*/ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 260 continue >*/ /* L260: */ } /*< u(l,l) = (1.0e0,0.0e0) >*/ i__2 = l + l * u_dim1; u[i__2].r = (float)1., u[i__2].i = (float)0.; /*< 270 continue >*/ L270: /*< 280 continue >*/ /* L280: */ ; } /*< 290 continue >*/ L290: /*< 300 continue >*/ L300: /* if it is required, generate v. */ /*< if (.not.wantv) go to 350 >*/ if (! wantv) { goto L350; } /*< do 340 ll = 1, p >*/ i__1 = *p; for (ll = 1; ll <= i__1; ++ll) { /*< l = p - ll + 1 >*/ l = *p - ll + 1; /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (l .gt. nrt) go to 320 >*/ if (l > nrt) { goto L320; } /*< if (cabs1(e(l)) .eq. 0.0e0) go to 320 >*/ i__2 = l; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2) ) == (float)0.) { goto L320; } /*< do 310 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< t = -cdotc(p-l,v(lp1,l),1,v(lp1,j),1)/v(lp1,l) >*/ i__3 = *p - l; cdotc_(&q__3, &i__3, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * v_dim1], &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &v[lp1 + l * v_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(p-l,t,v(lp1,l),1,v(lp1,j),1) >*/ i__3 = *p - l; caxpy_(&i__3, &t, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * v_dim1], &c__1); /*< 310 continue >*/ /* L310: */ } /*< 320 continue >*/ L320: /*< do 330 i = 1, p >*/ i__2 = *p; for (i__ = 1; i__ <= i__2; ++i__) { /*< v(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * v_dim1; v[i__3].r = (float)0., v[i__3].i = (float)0.; /*< 330 continue >*/ /* L330: */ } /*< v(l,l) = (1.0e0,0.0e0) >*/ i__2 = l + l * v_dim1; v[i__2].r = (float)1., v[i__2].i = (float)0.; /*< 340 continue >*/ /* L340: */ } /*< 350 continue >*/ L350: /* transform s and e so that they are real. */ /*< do 380 i = 1, m >*/ i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { /*< if (cabs1(s(i)) .eq. 0.0e0) go to 360 >*/ i__2 = i__; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[i__]), dabs( r__2)) == (float)0.) { goto L360; } /*< t = cmplx(cabs(s(i)),0.0e0) >*/ r__1 = c_abs(&s[i__]); q__1.r = r__1, q__1.i = (float)0.; t.r = q__1.r, t.i = q__1.i; /*< r = s(i)/t >*/ c_div(&q__1, &s[i__], &t); r__.r = q__1.r, r__.i = q__1.i; /*< s(i) = t >*/ i__2 = i__; s[i__2].r = t.r, s[i__2].i = t.i; /*< if (i .lt. m) e(i) = e(i)/r >*/ if (i__ < m) { i__2 = i__; c_div(&q__1, &e[i__], &r__); e[i__2].r = q__1.r, e[i__2].i = q__1.i; } /*< if (wantu) call cscal(n,r,u(1,i),1) >*/ if (wantu) { cscal_(n, &r__, &u[i__ * u_dim1 + 1], &c__1); } /*< 360 continue >*/ L360: /* ...exit */ /*< if (i .eq. m) go to 390 >*/ if (i__ == m) { goto L390; } /*< if (cabs1(e(i)) .eq. 0.0e0) go to 370 >*/ i__2 = i__; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[i__]), dabs( r__2)) == (float)0.) { goto L370; } /*< t = cmplx(cabs(e(i)),0.0e0) >*/ r__1 = c_abs(&e[i__]); q__1.r = r__1, q__1.i = (float)0.; t.r = q__1.r, t.i = q__1.i; /*< r = t/e(i) >*/ c_div(&q__1, &t, &e[i__]); r__.r = q__1.r, r__.i = q__1.i; /*< e(i) = t >*/ i__2 = i__; e[i__2].r = t.r, e[i__2].i = t.i; /*< s(i+1) = s(i+1)*r >*/ i__2 = i__ + 1; i__3 = i__ + 1; q__1.r = s[i__3].r * r__.r - s[i__3].i * r__.i, q__1.i = s[i__3].r * r__.i + s[i__3].i * r__.r; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (wantv) call cscal(p,r,v(1,i+1),1) >*/ if (wantv) { cscal_(p, &r__, &v[(i__ + 1) * v_dim1 + 1], &c__1); } /*< 370 continue >*/ L370: /*< 380 continue >*/ /* L380: */ ; } /*< 390 continue >*/ L390: /* main iteration loop for the singular values. */ /*< mm = m >*/ mm = m; /*< iter = 0 >*/ iter = 0; /*< 400 continue >*/ L400: /* quit if all the singular values have been found. */ /* ...exit */ /*< if (m .eq. 0) go to 660 >*/ if (m == 0) { goto L660; } /* if too many iterations have been performed, set */ /* flag and return. */ /*< if (iter .lt. maxit) go to 410 >*/ if (iter < maxit) { goto L410; } /*< info = m >*/ *info = m; /* ......exit */ /*< go to 660 >*/ goto L660; /*< 410 continue >*/ L410: /* this section of the program inspects for */ /* negligible elements in the s and e arrays. on */ /* completion the variables kase and l are set as follows. */ /* kase = 1 if s(m) and e(l-1) are negligible and l.lt.m */ /* kase = 2 if s(l) is negligible and l.lt.m */ /* kase = 3 if e(l-1) is negligible, l.lt.m, and */ /* s(l), ..., s(m) are not negligible (qr step). */ /* kase = 4 if e(m-1) is negligible (convergence). */ /*< do 430 ll = 1, m >*/ i__1 = m; for (ll = 1; ll <= i__1; ++ll) { /*< l = m - ll >*/ l = m - ll; /* ...exit */ /*< if (l .eq. 0) go to 440 >*/ if (l == 0) { goto L440; } /*< test = cabs(s(l)) + cabs(s(l+1)) >*/ test = c_abs(&s[l]) + c_abs(&s[l + 1]); /*< ztest = test + cabs(e(l)) >*/ ztest = test + c_abs(&e[l]); /*< if (ztest .ne. test) go to 420 >*/ if (ztest != test) { goto L420; } /*< e(l) = (0.0e0,0.0e0) >*/ i__2 = l; e[i__2].r = (float)0., e[i__2].i = (float)0.; /* ......exit */ /*< go to 440 >*/ goto L440; /*< 420 continue >*/ L420: /*< 430 continue >*/ /* L430: */ ; } /*< 440 continue >*/ L440: /*< if (l .ne. m - 1) go to 450 >*/ if (l != m - 1) { goto L450; } /*< kase = 4 >*/ kase = 4; /*< go to 520 >*/ goto L520; /*< 450 continue >*/ L450: /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< mp1 = m + 1 >*/ mp1 = m + 1; /*< do 470 lls = lp1, mp1 >*/ i__1 = mp1; for (lls = lp1; lls <= i__1; ++lls) { /*< ls = m - lls + lp1 >*/ ls = m - lls + lp1; /* ...exit */ /*< if (ls .eq. l) go to 480 >*/ if (ls == l) { goto L480; } /*< test = 0.0e0 >*/ test = (float)0.; /*< if (ls .ne. m) test = test + cabs(e(ls)) >*/ if (ls != m) { test += c_abs(&e[ls]); } /*< if (ls .ne. l + 1) test = test + cabs(e(ls-1)) >*/ if (ls != l + 1) { test += c_abs(&e[ls - 1]); } /*< ztest = test + cabs(s(ls)) >*/ ztest = test + c_abs(&s[ls]); /*< if (ztest .ne. test) go to 460 >*/ if (ztest != test) { goto L460; } /*< s(ls) = (0.0e0,0.0e0) >*/ i__2 = ls; s[i__2].r = (float)0., s[i__2].i = (float)0.; /* ......exit */ /*< go to 480 >*/ goto L480; /*< 460 continue >*/ L460: /*< 470 continue >*/ /* L470: */ ; } /*< 480 continue >*/ L480: /*< if (ls .ne. l) go to 490 >*/ if (ls != l) { goto L490; } /*< kase = 3 >*/ kase = 3; /*< go to 510 >*/ goto L510; /*< 490 continue >*/ L490: /*< if (ls .ne. m) go to 500 >*/ if (ls != m) { goto L500; } /*< kase = 1 >*/ kase = 1; /*< go to 510 >*/ goto L510; /*< 500 continue >*/ L500: /*< kase = 2 >*/ kase = 2; /*< l = ls >*/ l = ls; /*< 510 continue >*/ L510: /*< 520 continue >*/ L520: /*< l = l + 1 >*/ ++l; /* perform the task indicated by kase. */ /*< go to (530, 560, 580, 610), kase >*/ switch (kase) { case 1: goto L530; case 2: goto L560; case 3: goto L580; case 4: goto L610; } /* deflate negligible s(m). */ /*< 530 continue >*/ L530: /*< mm1 = m - 1 >*/ mm1 = m - 1; /*< f = real(e(m-1)) >*/ i__1 = m - 1; f = e[i__1].r; /*< e(m-1) = (0.0e0,0.0e0) >*/ i__1 = m - 1; e[i__1].r = (float)0., e[i__1].i = (float)0.; /*< do 550 kk = l, mm1 >*/ i__1 = mm1; for (kk = l; kk <= i__1; ++kk) { /*< k = mm1 - kk + l >*/ k = mm1 - kk + l; /*< t1 = real(s(k)) >*/ i__2 = k; t1 = s[i__2].r; /*< call srotg(t1,f,cs,sn) >*/ srotg_(&t1, &f, &cs, &sn); /*< s(k) = cmplx(t1,0.0e0) >*/ i__2 = k; q__1.r = t1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (k .eq. l) go to 540 >*/ if (k == l) { goto L540; } /*< f = -sn*real(e(k-1)) >*/ i__2 = k - 1; f = -sn * e[i__2].r; /*< e(k-1) = cs*e(k-1) >*/ i__2 = k - 1; i__3 = k - 1; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< 540 continue >*/ L540: /*< if (wantv) call csrot(p,v(1,k),1,v(1,m),1,cs,sn) >*/ if (wantv) { csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[m * v_dim1 + 1], &c__1, & cs, &sn); } /*< 550 continue >*/ /* L550: */ } /*< go to 650 >*/ goto L650; /* split at negligible s(l). */ /*< 560 continue >*/ L560: /*< f = real(e(l-1)) >*/ i__1 = l - 1; f = e[i__1].r; /*< e(l-1) = (0.0e0,0.0e0) >*/ i__1 = l - 1; e[i__1].r = (float)0., e[i__1].i = (float)0.; /*< do 570 k = l, m >*/ i__1 = m; for (k = l; k <= i__1; ++k) { /*< t1 = real(s(k)) >*/ i__2 = k; t1 = s[i__2].r; /*< call srotg(t1,f,cs,sn) >*/ srotg_(&t1, &f, &cs, &sn); /*< s(k) = cmplx(t1,0.0e0) >*/ i__2 = k; q__1.r = t1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< f = -sn*real(e(k)) >*/ i__2 = k; f = -sn * e[i__2].r; /*< e(k) = cs*e(k) >*/ i__2 = k; i__3 = k; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (wantu) call csrot(n,u(1,k),1,u(1,l-1),1,cs,sn) >*/ if (wantu) { csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(l - 1) * u_dim1 + 1], & c__1, &cs, &sn); } /*< 570 continue >*/ /* L570: */ } /*< go to 650 >*/ goto L650; /* perform one qr step. */ /*< 580 continue >*/ L580: /* calculate the shift. */ /*< >*/ /* Computing MAX */ r__1 = c_abs(&s[m]), r__2 = c_abs(&s[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&e[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&s[l]), r__1 = max(r__1,r__2), r__2 = c_abs(&e[l]); scale = dmax(r__1,r__2); /*< sm = real(s(m))/scale >*/ i__1 = m; sm = s[i__1].r / scale; /*< smm1 = real(s(m-1))/scale >*/ i__1 = m - 1; smm1 = s[i__1].r / scale; /*< emm1 = real(e(m-1))/scale >*/ i__1 = m - 1; emm1 = e[i__1].r / scale; /*< sl = real(s(l))/scale >*/ i__1 = l; sl = s[i__1].r / scale; /*< el = real(e(l))/scale >*/ i__1 = l; el = e[i__1].r / scale; /*< b = ((smm1 + sm)*(smm1 - sm) + emm1**2)/2.0e0 >*/ /* Computing 2nd power */ r__1 = emm1; b = ((smm1 + sm) * (smm1 - sm) + r__1 * r__1) / (float)2.; /*< c = (sm*emm1)**2 >*/ /* Computing 2nd power */ r__1 = sm * emm1; c__ = r__1 * r__1; /*< shift = 0.0e0 >*/ shift = (float)0.; /*< if (b .eq. 0.0e0 .and. c .eq. 0.0e0) go to 590 >*/ if (b == (float)0. && c__ == (float)0.) { goto L590; } /*< shift = sqrt(b**2+c) >*/ /* Computing 2nd power */ r__1 = b; shift = sqrt(r__1 * r__1 + c__); /*< if (b .lt. 0.0e0) shift = -shift >*/ if (b < (float)0.) { shift = -shift; } /*< shift = c/(b + shift) >*/ shift = c__ / (b + shift); /*< 590 continue >*/ L590: /*< f = (sl + sm)*(sl - sm) + shift >*/ f = (sl + sm) * (sl - sm) + shift; /*< g = sl*el >*/ g = sl * el; /* chase zeros. */ /*< mm1 = m - 1 >*/ mm1 = m - 1; /*< do 600 k = l, mm1 >*/ i__1 = mm1; for (k = l; k <= i__1; ++k) { /*< call srotg(f,g,cs,sn) >*/ srotg_(&f, &g, &cs, &sn); /*< if (k .ne. l) e(k-1) = cmplx(f,0.0e0) >*/ if (k != l) { i__2 = k - 1; q__1.r = f, q__1.i = (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; } /*< f = cs*real(s(k)) + sn*real(e(k)) >*/ i__2 = k; i__3 = k; f = cs * s[i__2].r + sn * e[i__3].r; /*< e(k) = cs*e(k) - sn*s(k) >*/ i__2 = k; i__3 = k; q__2.r = cs * e[i__3].r, q__2.i = cs * e[i__3].i; i__4 = k; q__3.r = sn * s[i__4].r, q__3.i = sn * s[i__4].i; q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< g = sn*real(s(k+1)) >*/ i__2 = k + 1; g = sn * s[i__2].r; /*< s(k+1) = cs*s(k+1) >*/ i__2 = k + 1; i__3 = k + 1; q__1.r = cs * s[i__3].r, q__1.i = cs * s[i__3].i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (wantv) call csrot(p,v(1,k),1,v(1,k+1),1,cs,sn) >*/ if (wantv) { csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[(k + 1) * v_dim1 + 1], & c__1, &cs, &sn); } /*< call srotg(f,g,cs,sn) >*/ srotg_(&f, &g, &cs, &sn); /*< s(k) = cmplx(f,0.0e0) >*/ i__2 = k; q__1.r = f, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< f = cs*real(e(k)) + sn*real(s(k+1)) >*/ i__2 = k; i__3 = k + 1; f = cs * e[i__2].r + sn * s[i__3].r; /*< s(k+1) = -sn*e(k) + cs*s(k+1) >*/ i__2 = k + 1; r__1 = -sn; i__3 = k; q__2.r = r__1 * e[i__3].r, q__2.i = r__1 * e[i__3].i; i__4 = k + 1; q__3.r = cs * s[i__4].r, q__3.i = cs * s[i__4].i; q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< g = sn*real(e(k+1)) >*/ i__2 = k + 1; g = sn * e[i__2].r; /*< e(k+1) = cs*e(k+1) >*/ i__2 = k + 1; i__3 = k + 1; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< >*/ if (wantu && k < *n) { csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(k + 1) * u_dim1 + 1], & c__1, &cs, &sn); } /*< 600 continue >*/ /* L600: */ } /*< e(m-1) = cmplx(f,0.0e0) >*/ i__1 = m - 1; q__1.r = f, q__1.i = (float)0.; e[i__1].r = q__1.r, e[i__1].i = q__1.i; /*< iter = iter + 1 >*/ ++iter; /*< go to 650 >*/ goto L650; /* convergence. */ /*< 610 continue >*/ L610: /* make the singular value positive */ /*< if (real(s(l)) .ge. 0.0e0) go to 620 >*/ i__1 = l; if (s[i__1].r >= (float)0.) { goto L620; } /*< s(l) = -s(l) >*/ i__1 = l; i__2 = l; q__1.r = -s[i__2].r, q__1.i = -s[i__2].i; s[i__1].r = q__1.r, s[i__1].i = q__1.i; /*< if (wantv) call cscal(p,(-1.0e0,0.0e0),v(1,l),1) >*/ if (wantv) { cscal_(p, &c_b53, &v[l * v_dim1 + 1], &c__1); } /*< 620 continue >*/ L620: /* order the singular value. */ /*< 630 if (l .eq. mm) go to 640 >*/ L630: if (l == mm) { goto L640; } /* ...exit */ /*< if (real(s(l)) .ge. real(s(l+1))) go to 640 >*/ i__1 = l; i__2 = l + 1; if (s[i__1].r >= s[i__2].r) { goto L640; } /*< t = s(l) >*/ i__1 = l; t.r = s[i__1].r, t.i = s[i__1].i; /*< s(l) = s(l+1) >*/ i__1 = l; i__2 = l + 1; s[i__1].r = s[i__2].r, s[i__1].i = s[i__2].i; /*< s(l+1) = t >*/ i__1 = l + 1; s[i__1].r = t.r, s[i__1].i = t.i; /*< >*/ if (wantv && l < *p) { cswap_(p, &v[l * v_dim1 + 1], &c__1, &v[(l + 1) * v_dim1 + 1], &c__1); } /*< >*/ if (wantu && l < *n) { cswap_(n, &u[l * u_dim1 + 1], &c__1, &u[(l + 1) * u_dim1 + 1], &c__1); } /*< l = l + 1 >*/ ++l; /*< go to 630 >*/ goto L630; /*< 640 continue >*/ L640: /*< iter = 0 >*/ iter = 0; /*< m = m - 1 >*/ --m; /*< 650 continue >*/ L650: /*< go to 400 >*/ goto L400; /*< 660 continue >*/ L660: /*< return >*/ return 0; /*< end >*/ } /* csvdc_ */ #ifdef __cplusplus } #endif
eile/ITK
Modules/ThirdParty/VNL/src/vxl/v3p/netlib/linpack/csvdc.c
C
apache-2.0
39,375
/* * amrnb audio input device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/msm_audio_7X30.h> #include <linux/msm_audio_amrnb.h> #include <asm/atomic.h> #include <asm/ioctls.h> #include <mach/msm_adsp.h> #include <mach/qdsp5v2/qdsp5audreccmdi.h> #include <mach/qdsp5v2/qdsp5audrecmsg.h> #include <mach/qdsp5v2/audpreproc.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/debug_mm.h> /* FRAME_NUM must be a power of two */ #define FRAME_NUM (8) #define FRAME_SIZE (22 * 2) /* 36 bytes data */ #define DMASZ (FRAME_SIZE * FRAME_NUM) struct buffer { void *data; uint32_t size; uint32_t read; uint32_t addr; }; struct audio_in { struct buffer in[FRAME_NUM]; spinlock_t dsp_lock; atomic_t in_bytes; atomic_t in_samples; struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; wait_queue_head_t wait_enable; struct msm_adsp_module *audrec; /* configuration to use on next enable */ uint32_t buffer_size; /* Frame size (36 bytes) */ uint32_t enc_type; int dtx_mode; uint32_t frame_format; uint32_t used_mode; uint32_t rec_mode; uint32_t dsp_cnt; uint32_t in_head; /* next buffer dsp will write */ uint32_t in_tail; /* next buffer read() will read */ uint32_t in_count; /* number of buffers available to read() */ uint32_t mode; const char *module_name; unsigned queue_ids; uint16_t enc_id; uint16_t source; /* Encoding source bit mask */ uint32_t device_events; uint32_t in_call; uint32_t dev_cnt; int voice_state; spinlock_t dev_lock; /* data allocated for various buffers */ char *data; dma_addr_t phys; int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ }; struct audio_frame { uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; unsigned char raw_bitstream[]; /* samples */ } __attribute__((packed)); /* Audrec Queue command sent macro's */ #define audrec_send_bitstreamqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\ cmd, len) #define audrec_send_audrecqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\ cmd, len) struct audio_in the_audio_amrnb_in; /* DSP command send functions */ static int audamrnb_in_enc_config(struct audio_in *audio, int enable); static int audamrnb_in_param_config(struct audio_in *audio); static int audamrnb_in_mem_config(struct audio_in *audio); static int audamrnb_in_record_config(struct audio_in *audio, int enable); static int audamrnb_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt); static void audamrnb_in_get_dsp_frames(struct audio_in *audio); static void audamrnb_in_flush(struct audio_in *audio); static void amrnb_in_listener(u32 evt_id, union auddev_evt_data *evt_payload, void *private_data) { struct audio_in *audio = (struct audio_in *) private_data; unsigned long flags; MM_DBG("evt_id = 0x%8x\n", evt_id); switch (evt_id) { case AUDDEV_EVT_DEV_RDY: { MM_DBG("AUDDEV_EVT_DEV_RDY\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt++; if (!audio->in_call) audio->source |= (0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((audio->running == 1) && (audio->enabled == 1)) audamrnb_in_record_config(audio, 1); break; } case AUDDEV_EVT_DEV_RLS: { MM_DBG("AUDDEV_EVT_DEV_RLS\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt--; if (!audio->in_call) audio->source &= ~(0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((!audio->running) || (!audio->enabled)) break; /* Turn of as per source */ if (audio->source) audamrnb_in_record_config(audio, 1); else /* Turn off all */ audamrnb_in_record_config(audio, 0); break; } case AUDDEV_EVT_VOICE_STATE_CHG: { MM_DBG("AUDDEV_EVT_VOICE_STATE_CHG, state = %d\n", evt_payload->voice_state); audio->voice_state = evt_payload->voice_state; if (audio->in_call && audio->running) { if (audio->voice_state == VOICE_STATE_INCALL) audamrnb_in_record_config(audio, 1); else if (audio->voice_state == VOICE_STATE_OFFCALL) { audamrnb_in_record_config(audio, 0); wake_up(&audio->wait); } } break; } default: MM_AUD_ERR("wrong event %d\n", evt_id); break; } } /* ------------------- dsp preproc event handler--------------------- */ static void audpreproc_dsp_event(void *data, unsigned id, void *msg) { struct audio_in *audio = data; switch (id) { case AUDPREPROC_ERROR_MSG: { struct audpreproc_err_msg *err_msg = msg; MM_AUD_ERR("ERROR_MSG: stream id %d err idx %d\n", err_msg->stream_id, err_msg->aud_preproc_err_idx); /* Error case */ wake_up(&audio->wait_enable); break; } case AUDPREPROC_CMD_CFG_DONE_MSG: { MM_DBG("CMD_CFG_DONE_MSG \n"); break; } case AUDPREPROC_CMD_ENC_CFG_DONE_MSG: { struct audpreproc_cmd_enc_cfg_done_msg *enc_cfg_msg = msg; MM_DBG("CMD_ENC_CFG_DONE_MSG: stream id %d enc type \ 0x%8x\n", enc_cfg_msg->stream_id, enc_cfg_msg->rec_enc_type); /* Encoder enable success */ if (enc_cfg_msg->rec_enc_type & ENCODE_ENABLE) audamrnb_in_param_config(audio); else { /* Encoder disable success */ audio->running = 0; audamrnb_in_record_config(audio, 0); } break; } case AUDPREPROC_CMD_ENC_PARAM_CFG_DONE_MSG: { MM_DBG("CMD_ENC_PARAM_CFG_DONE_MSG \n"); audamrnb_in_mem_config(audio); break; } case AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG: { MM_DBG("AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG \n"); wake_up(&audio->wait_enable); break; } default: MM_AUD_ERR("Unknown Event id %d\n", id); } } /* ------------------- dsp audrec event handler--------------------- */ static void audrec_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct audio_in *audio = data; switch (id) { case AUDREC_CMD_MEM_CFG_DONE_MSG: { MM_DBG("CMD_MEM_CFG_DONE MSG DONE\n"); audio->running = 1; if ((!audio->in_call && (audio->dev_cnt > 0)) || (audio->in_call && (audio->voice_state == VOICE_STATE_INCALL))) audamrnb_in_record_config(audio, 1); break; } case AUDREC_FATAL_ERR_MSG: { struct audrec_fatal_err_msg fatal_err_msg; getevent(&fatal_err_msg, AUDREC_FATAL_ERR_MSG_LEN); MM_AUD_ERR("FATAL_ERR_MSG: err id %d\n", fatal_err_msg.audrec_err_id); /* Error stop the encoder */ audio->stopped = 1; wake_up(&audio->wait); break; } case AUDREC_UP_PACKET_READY_MSG: { struct audrec_up_pkt_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_PACKET_READY_MSG_LEN); MM_DBG("UP_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packet_write_cnt_lsw, \ pkt_ready_msg.audrec_packet_write_cnt_msw, \ pkt_ready_msg.audrec_up_prev_read_cnt_lsw, \ pkt_ready_msg.audrec_up_prev_read_cnt_msw); audamrnb_in_get_dsp_frames(audio); break; } default: MM_AUD_ERR("Unknown Event id %d\n", id); } } static void audamrnb_in_get_dsp_frames(struct audio_in *audio) { struct audio_frame *frame; uint32_t index; unsigned long flags; index = audio->in_head; frame = (void *) (((char *)audio->in[index].data) - \ sizeof(*frame)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); else audio->in_count++; audamrnb_dsp_read_buffer(audio, audio->dsp_cnt++); spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } struct msm_adsp_ops audrec_amrnb_adsp_ops = { .event = audrec_dsp_event, }; static int audamrnb_in_enc_config(struct audio_in *audio, int enable) { struct audpreproc_audrec_cmd_enc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG; cmd.stream_id = audio->enc_id; if (enable) cmd.audrec_enc_type = audio->enc_type | ENCODE_ENABLE; else cmd.audrec_enc_type &= ~(ENCODE_ENABLE); return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audamrnb_in_param_config(struct audio_in *audio) { struct audpreproc_audrec_cmd_parm_cfg_amrnb cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPREPROC_AUDREC_CMD_PARAM_CFG; cmd.common.stream_id = audio->enc_id; cmd.dtx_mode = audio->dtx_mode; cmd.test_mode = -1; /* Default set to -1 */ cmd.used_mode = audio->used_mode; return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } /* To Do: msm_snddev_route_enc(audio->enc_id); */ static int audamrnb_in_record_config(struct audio_in *audio, int enable) { struct audpreproc_afe_cmd_audio_record_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG; cmd.stream_id = audio->enc_id; if (enable) cmd.destination_activity = AUDIO_RECORDING_TURN_ON; else cmd.destination_activity = AUDIO_RECORDING_TURN_OFF; cmd.source_mix_mask = audio->source; if (audio->enc_id == 2) { if ((cmd.source_mix_mask & INTERNAL_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & AUX_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_UL_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_DL_SOURCE_MIX_MASK)) { cmd.pipe_id = SOURCE_PIPE_1; } if (cmd.source_mix_mask & AUDPP_A2DP_PIPE_SOURCE_MIX_MASK) cmd.pipe_id |= SOURCE_PIPE_0; } return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audamrnb_in_mem_config(struct audio_in *audio) { struct audrec_cmd_arecmem_cfg cmd; uint16_t *data = (void *) audio->data; int n; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_MEM_CFG_CMD; cmd.audrec_up_pkt_intm_count = 1; cmd.audrec_ext_pkt_start_addr_msw = audio->phys >> 16; cmd.audrec_ext_pkt_start_addr_lsw = audio->phys; cmd.audrec_ext_pkt_buf_number = FRAME_NUM; /* prepare buffer pointers: * 36 bytes amrnb packet + 4 halfword header */ for (n = 0; n < FRAME_NUM; n++) { audio->in[n].data = data + 4; data += (FRAME_SIZE/2); /* word increment */ MM_DBG("0x%8x\n", (int)(audio->in[n].data - 8)); } return audrec_send_audrecqueue(audio, &cmd, sizeof(cmd)); } static int audamrnb_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt) { struct up_audrec_packet_ext_ptr cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = UP_AUDREC_PACKET_EXT_PTR; cmd.audrec_up_curr_read_count_msw = read_cnt >> 16; cmd.audrec_up_curr_read_count_lsw = read_cnt; return audrec_send_bitstreamqueue(audio, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audamrnb_in_enable(struct audio_in *audio) { if (audio->enabled) return 0; if (audpreproc_enable(audio->enc_id, &audpreproc_dsp_event, audio)) { MM_AUD_ERR("msm_adsp_enable(audpreproc) failed\n"); return -ENODEV; } if (msm_adsp_enable(audio->audrec)) { MM_AUD_ERR("msm_adsp_enable(audrec) failed\n"); audpreproc_disable(audio->enc_id, audio); return -ENODEV; } audio->enabled = 1; audamrnb_in_enc_config(audio, 1); return 0; } /* must be called with audio->lock held */ static int audamrnb_in_disable(struct audio_in *audio) { if (audio->enabled) { audio->enabled = 0; audamrnb_in_enc_config(audio, 0); wake_up(&audio->wait); wait_event_interruptible_timeout(audio->wait_enable, audio->running == 0, 1*HZ); msm_adsp_disable(audio->audrec); audpreproc_disable(audio->enc_id, audio); } return 0; } static void audamrnb_in_flush(struct audio_in *audio) { int i; audio->dsp_cnt = 0; audio->in_head = 0; audio->in_tail = 0; audio->in_count = 0; for (i = 0; i < FRAME_NUM; i++) { audio->in[i].size = 0; audio->in[i].read = 0; } MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes)); MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); } /* ------------------- device --------------------- */ static long audamrnb_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_in *audio = file->private_data; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { uint32_t freq; freq = 48000; MM_DBG("AUDIO_START\n"); if (audio->in_call && (audio->voice_state != VOICE_STATE_INCALL)) { rc = -EPERM; break; } rc = msm_snddev_request_freq(&freq, audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d\n", freq); if (rc < 0) { MM_DBG(" Sample rate can not be set, return code %d\n", rc); msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); break; } rc = audamrnb_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { rc = audamrnb_in_disable(audio); rc = msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); audio->stopped = 1; break; } case AUDIO_FLUSH: { if (audio->stopped) { /* Make sure we're stopped and we wake any threads * that might be blocked holding the read_lock. * While audio->stopped read threads will always * exit immediately. */ wake_up(&audio->wait); mutex_lock(&audio->read_lock); audamrnb_in_flush(audio); mutex_unlock(&audio->read_lock); } break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (cfg.buffer_size != (FRAME_SIZE - 8)) rc = -EINVAL; else audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; memset(&cfg, 0, sizeof(cfg)); cfg.dtx_enable = ((audio->dtx_mode == -1) ? 1 : 0); cfg.band_mode = audio->used_mode; cfg.frame_format = audio->frame_format; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* DSP does not support any other than default format */ if (audio->frame_format != cfg.frame_format) { rc = -EINVAL; break; } if (cfg.dtx_enable == 0) audio->dtx_mode = 0; else if (cfg.dtx_enable == 1) audio->dtx_mode = -1; else { rc = -EINVAL; break; } audio->used_mode = cfg.band_mode; break; } case AUDIO_SET_INCALL: { struct msm_voicerec_mode cfg; unsigned long flags; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } if (cfg.rec_mode != VOC_REC_BOTH && cfg.rec_mode != VOC_REC_UPLINK && cfg.rec_mode != VOC_REC_DOWNLINK) { MM_AUD_ERR("invalid rec_mode\n"); rc = -EINVAL; break; } else { spin_lock_irqsave(&audio->dev_lock, flags); if (cfg.rec_mode == VOC_REC_UPLINK) audio->source = VOICE_UL_SOURCE_MIX_MASK; else if (cfg.rec_mode == VOC_REC_DOWNLINK) audio->source = VOICE_DL_SOURCE_MIX_MASK; else audio->source = VOICE_DL_SOURCE_MIX_MASK | VOICE_UL_SOURCE_MIX_MASK ; audio->in_call = 1; spin_unlock_irqrestore(&audio->dev_lock, flags); } break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->enc_id, sizeof(unsigned short))) { rc = -EFAULT; } break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static ssize_t audamrnb_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->wait, (audio->in_count > 0) || audio->stopped || (audio->in_call && audio->running && (audio->voice_state == VOICE_STATE_OFFCALL))); if (rc < 0) break; if (!audio->in_count) { if (audio->stopped) { rc = 0;/* End of File */ break; } else if (audio->in_call && audio->running && (audio->voice_state == VOICE_STATE_OFFCALL)) { MM_DBG("Not Permitted Voice Terminated\n"); rc = -EPERM; /* Voice Call stopped */ break; } } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (count >= size) { if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is * invalid and we need to retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; } else { MM_AUD_ERR("short read\n"); break; } } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; } static ssize_t audamrnb_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { return -EINVAL; } static int audamrnb_in_release(struct inode *inode, struct file *file) { struct audio_in *audio = file->private_data; MM_DBG("\n"); mutex_lock(&audio->lock); audio->in_call = 0; /* with draw frequency for session incase not stopped the driver */ msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); auddev_unregister_evt_listner(AUDDEV_CLNT_ENC, audio->enc_id); audamrnb_in_disable(audio); audamrnb_in_flush(audio); msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); audio->audrec = NULL; audio->opened = 0; mutex_unlock(&audio->lock); return 0; } static int audamrnb_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_amrnb_in; int rc; int encid; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = -EACCES; MM_AUD_ERR("Non tunnel encoding is not supported\n"); goto done; } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; MM_DBG("Opened for tunnel mode encoding\n"); } else { rc = -EACCES; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->buffer_size = (FRAME_SIZE - 8); audio->enc_type = ENC_TYPE_AMRNB | audio->mode; audio->dtx_mode = -1; audio->frame_format = 0; audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */ encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_AUD_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_amrnb_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->stopped = 0; audio->source = 0; audamrnb_in_flush(audio); audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS | AUDDEV_EVT_VOICE_STATE_CHG; audio->voice_state = VOICE_STATE_INCALL; rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_ENC, audio->enc_id, amrnb_in_listener, (void *) audio); if (rc) { MM_AUD_ERR("failed to register device event listener\n"); goto evt_error; } file->private_data = audio; audio->opened = 1; done: mutex_unlock(&audio->lock); return rc; evt_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; } static const struct file_operations audio_in_fops = { .owner = THIS_MODULE, .open = audamrnb_in_open, .release = audamrnb_in_release, .read = audamrnb_in_read, .write = audamrnb_in_write, .unlocked_ioctl = audamrnb_in_ioctl, }; struct miscdevice audio_amrnb_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrnb_in", .fops = &audio_in_fops, }; static int __init audamrnb_in_init(void) { the_audio_amrnb_in.data = dma_alloc_coherent(NULL, DMASZ, &the_audio_amrnb_in.phys, GFP_KERNEL); MM_DBG("Memory addr = 0x%8x Phy addr= 0x%8x ---- \n", \ (int) the_audio_amrnb_in.data, (int) the_audio_amrnb_in.phys); if (!the_audio_amrnb_in.data) { MM_AUD_ERR("Unable to allocate DMA buffer\n"); return -ENOMEM; } mutex_init(&the_audio_amrnb_in.lock); mutex_init(&the_audio_amrnb_in.read_lock); spin_lock_init(&the_audio_amrnb_in.dsp_lock); spin_lock_init(&the_audio_amrnb_in.dev_lock); init_waitqueue_head(&the_audio_amrnb_in.wait); init_waitqueue_head(&the_audio_amrnb_in.wait_enable); return misc_register(&audio_amrnb_in_misc); } device_initcall(audamrnb_in_init);
chrisch1974/htc7x30-2.6-flyer
arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c
C
gpl-2.0
22,485
/****************************************************************************** * * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * *****************************************************************************/ /* * Copyright (C) 2000 - 2015, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsrepair2") /* * Information structure and handler for ACPI predefined names that can * be repaired on a per-name basis. */ typedef acpi_status(*acpi_repair_function) (struct acpi_evaluate_info * info, union acpi_operand_object **return_object_ptr); typedef struct acpi_repair_info { char name[ACPI_NAME_SIZE]; acpi_repair_function repair_function; } acpi_repair_info; /* Local prototypes */ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct acpi_namespace_node *node); static acpi_status acpi_ns_repair_ALR(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_CID(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_CST(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_FDE(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_HID(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_PRT(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_PSS(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_repair_TSS(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_check_sorted_list(struct acpi_evaluate_info *info, union acpi_operand_object *return_object, u32 start_index, u32 expected_count, u32 sort_index, u8 sort_direction, char *sort_key_name); /* Values for sort_direction above */ #define ACPI_SORT_ASCENDING 0 #define ACPI_SORT_DESCENDING 1 static void acpi_ns_remove_element(union acpi_operand_object *obj_desc, u32 index); static void acpi_ns_sort_list(union acpi_operand_object **elements, u32 count, u32 index, u8 sort_direction); /* * This table contains the names of the predefined methods for which we can * perform more complex repairs. * * As necessary: * * _ALR: Sort the list ascending by ambient_illuminance * _CID: Strings: uppercase all, remove any leading asterisk * _CST: Sort the list ascending by C state type * _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs * _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs * _HID: Strings: uppercase all, remove any leading asterisk * _PRT: Fix reversed source_name and source_index * _PSS: Sort the list descending by Power * _TSS: Sort the list descending by Power * * Names that must be packages, but cannot be sorted: * * _BCL: Values are tied to the Package index where they appear, and cannot * be moved or sorted. These index values are used for _BQC and _BCM. * However, we can fix the case where a buffer is returned, by converting * it to a Package of integers. */ static const struct acpi_repair_info acpi_ns_repairable_names[] = { {"_ALR", acpi_ns_repair_ALR}, {"_CID", acpi_ns_repair_CID}, {"_CST", acpi_ns_repair_CST}, {"_FDE", acpi_ns_repair_FDE}, {"_GTM", acpi_ns_repair_FDE}, /* _GTM has same repair as _FDE */ {"_HID", acpi_ns_repair_HID}, {"_PRT", acpi_ns_repair_PRT}, {"_PSS", acpi_ns_repair_PSS}, {"_TSS", acpi_ns_repair_TSS}, {{0, 0, 0, 0}, NULL} /* Table terminator */ }; #define ACPI_FDE_FIELD_COUNT 5 #define ACPI_FDE_BYTE_BUFFER_SIZE 5 #define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * sizeof (u32)) /****************************************************************************** * * FUNCTION: acpi_ns_complex_repairs * * PARAMETERS: info - Method execution information block * node - Namespace node for the method/object * validate_status - Original status of earlier validation * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if repair was successful. If name is not * matched, validate_status is returned. * * DESCRIPTION: Attempt to repair/convert a return object of a type that was * not expected. * *****************************************************************************/ acpi_status acpi_ns_complex_repairs(struct acpi_evaluate_info *info, struct acpi_namespace_node *node, acpi_status validate_status, union acpi_operand_object **return_object_ptr) { const struct acpi_repair_info *predefined; acpi_status status; /* Check if this name is in the list of repairable names */ predefined = acpi_ns_match_complex_repair(node); if (!predefined) { return (validate_status); } status = predefined->repair_function(info, return_object_ptr); return (status); } /****************************************************************************** * * FUNCTION: acpi_ns_match_complex_repair * * PARAMETERS: node - Namespace node for the method/object * * RETURN: Pointer to entry in repair table. NULL indicates not found. * * DESCRIPTION: Check an object name against the repairable object list. * *****************************************************************************/ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct acpi_namespace_node *node) { const struct acpi_repair_info *this_name; /* Search info table for a repairable predefined method/object name */ this_name = acpi_ns_repairable_names; while (this_name->repair_function) { if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) { return (this_name); } this_name++; } return (NULL); /* Not found */ } /****************************************************************************** * * FUNCTION: acpi_ns_repair_ALR * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _ALR object. If necessary, sort the object list * ascending by the ambient illuminance values. * *****************************************************************************/ static acpi_status acpi_ns_repair_ALR(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status; status = acpi_ns_check_sorted_list(info, return_object, 0, 2, 1, ACPI_SORT_ASCENDING, "AmbientIlluminance"); return (status); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_FDE * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _FDE and _GTM objects. The expected return * value is a Buffer of 5 DWORDs. This function repairs a common * problem where the return value is a Buffer of BYTEs, not * DWORDs. * *****************************************************************************/ static acpi_status acpi_ns_repair_FDE(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object *buffer_object; u8 *byte_buffer; u32 *dword_buffer; u32 i; ACPI_FUNCTION_NAME(ns_repair_FDE); switch (return_object->common.type) { case ACPI_TYPE_BUFFER: /* This is the expected type. Length should be (at least) 5 DWORDs */ if (return_object->buffer.length >= ACPI_FDE_DWORD_BUFFER_SIZE) { return (AE_OK); } /* We can only repair if we have exactly 5 BYTEs */ if (return_object->buffer.length != ACPI_FDE_BYTE_BUFFER_SIZE) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Incorrect return buffer length %u, expected %u", return_object->buffer.length, ACPI_FDE_DWORD_BUFFER_SIZE)); return (AE_AML_OPERAND_TYPE); } /* Create the new (larger) buffer object */ buffer_object = acpi_ut_create_buffer_object(ACPI_FDE_DWORD_BUFFER_SIZE); if (!buffer_object) { return (AE_NO_MEMORY); } /* Expand each byte to a DWORD */ byte_buffer = return_object->buffer.pointer; dword_buffer = ACPI_CAST_PTR(u32, buffer_object->buffer.pointer); for (i = 0; i < ACPI_FDE_FIELD_COUNT; i++) { *dword_buffer = (u32) *byte_buffer; dword_buffer++; byte_buffer++; } ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s Expanded Byte Buffer to expected DWord Buffer\n", info->full_pathname)); break; default: return (AE_AML_OPERAND_TYPE); } /* Delete the original return object, return the new buffer object */ acpi_ut_remove_reference(return_object); *return_object_ptr = buffer_object; info->return_flags |= ACPI_OBJECT_REPAIRED; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_CID * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _CID object. If a string, ensure that all * letters are uppercase and that there is no leading asterisk. * If a Package, ensure same for all string elements. * *****************************************************************************/ static acpi_status acpi_ns_repair_CID(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { acpi_status status; union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object **element_ptr; union acpi_operand_object *original_element; u16 original_ref_count; u32 i; /* Check for _CID as a simple string */ if (return_object->common.type == ACPI_TYPE_STRING) { status = acpi_ns_repair_HID(info, return_object_ptr); return (status); } /* Exit if not a Package */ if (return_object->common.type != ACPI_TYPE_PACKAGE) { return (AE_OK); } /* Examine each element of the _CID package */ element_ptr = return_object->package.elements; for (i = 0; i < return_object->package.count; i++) { original_element = *element_ptr; original_ref_count = original_element->common.reference_count; status = acpi_ns_repair_HID(info, element_ptr); if (ACPI_FAILURE(status)) { return (status); } /* Take care with reference counts */ if (original_element != *element_ptr) { /* Element was replaced */ (*element_ptr)->common.reference_count = original_ref_count; acpi_ut_remove_reference(original_element); } element_ptr++; } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_CST * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _CST object: * 1. Sort the list ascending by C state type * 2. Ensure type cannot be zero * 3. A subpackage count of zero means _CST is meaningless * 4. Count must match the number of C state subpackages * *****************************************************************************/ static acpi_status acpi_ns_repair_CST(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object **outer_elements; u32 outer_element_count; union acpi_operand_object *obj_desc; acpi_status status; u8 removing; u32 i; ACPI_FUNCTION_NAME(ns_repair_CST); /* * Check if the C-state type values are proportional. */ outer_element_count = return_object->package.count - 1; i = 0; while (i < outer_element_count) { outer_elements = &return_object->package.elements[i + 1]; removing = FALSE; if ((*outer_elements)->package.count == 0) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "SubPackage[%u] - removing entry due to zero count", i)); removing = TRUE; goto remove_element; } obj_desc = (*outer_elements)->package.elements[1]; /* Index1 = Type */ if ((u32)obj_desc->integer.value == 0) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "SubPackage[%u] - removing entry due to invalid Type(0)", i)); removing = TRUE; } remove_element: if (removing) { acpi_ns_remove_element(return_object, i + 1); outer_element_count--; } else { i++; } } /* Update top-level package count, Type "Integer" checked elsewhere */ obj_desc = return_object->package.elements[0]; obj_desc->integer.value = outer_element_count; /* * Entries (subpackages) in the _CST Package must be sorted by the * C-state type, in ascending order. */ status = acpi_ns_check_sorted_list(info, return_object, 1, 4, 1, ACPI_SORT_ASCENDING, "C-State Type"); if (ACPI_FAILURE(status)) { return (status); } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_HID * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _HID object. If a string, ensure that all * letters are uppercase and that there is no leading asterisk. * *****************************************************************************/ static acpi_status acpi_ns_repair_HID(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object *new_string; char *source; char *dest; ACPI_FUNCTION_NAME(ns_repair_HID); /* We only care about string _HID objects (not integers) */ if (return_object->common.type != ACPI_TYPE_STRING) { return (AE_OK); } if (return_object->string.length == 0) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Invalid zero-length _HID or _CID string")); /* Return AE_OK anyway, let driver handle it */ info->return_flags |= ACPI_OBJECT_REPAIRED; return (AE_OK); } /* It is simplest to always create a new string object */ new_string = acpi_ut_create_string_object(return_object->string.length); if (!new_string) { return (AE_NO_MEMORY); } /* * Remove a leading asterisk if present. For some unknown reason, there * are many machines in the field that contains IDs like this. * * Examples: "*PNP0C03", "*ACPI0003" */ source = return_object->string.pointer; if (*source == '*') { source++; new_string->string.length--; ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Removed invalid leading asterisk\n", info->full_pathname)); } /* * Copy and uppercase the string. From the ACPI 5.0 specification: * * A valid PNP ID must be of the form "AAA####" where A is an uppercase * letter and # is a hex digit. A valid ACPI ID must be of the form * "NNNN####" where N is an uppercase letter or decimal digit, and * # is a hex digit. */ for (dest = new_string->string.pointer; *source; dest++, source++) { *dest = (char)toupper((int)*source); } acpi_ut_remove_reference(return_object); *return_object_ptr = new_string; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_PRT * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _PRT object. If necessary, fix reversed * source_name and source_index field, a common BIOS bug. * *****************************************************************************/ static acpi_status acpi_ns_repair_PRT(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *package_object = *return_object_ptr; union acpi_operand_object **top_object_list; union acpi_operand_object **sub_object_list; union acpi_operand_object *obj_desc; union acpi_operand_object *sub_package; u32 element_count; u32 index; /* Each element in the _PRT package is a subpackage */ top_object_list = package_object->package.elements; element_count = package_object->package.count; /* Examine each subpackage */ for (index = 0; index < element_count; index++, top_object_list++) { sub_package = *top_object_list; sub_object_list = sub_package->package.elements; /* Check for minimum required element count */ if (sub_package->package.count < 4) { continue; } /* * If the BIOS has erroneously reversed the _PRT source_name (index 2) * and the source_index (index 3), fix it. _PRT is important enough to * workaround this BIOS error. This also provides compatibility with * other ACPI implementations. */ obj_desc = sub_object_list[3]; if (!obj_desc || (obj_desc->common.type != ACPI_TYPE_INTEGER)) { sub_object_list[3] = sub_object_list[2]; sub_object_list[2] = obj_desc; info->return_flags |= ACPI_OBJECT_REPAIRED; ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "PRT[%X]: Fixed reversed SourceName and SourceIndex", index)); } } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_PSS * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _PSS object. If necessary, sort the object list * by the CPU frequencies. Check that the power dissipation values * are all proportional to CPU frequency (i.e., sorting by * frequency should be the same as sorting by power.) * *****************************************************************************/ static acpi_status acpi_ns_repair_PSS(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object **outer_elements; u32 outer_element_count; union acpi_operand_object **elements; union acpi_operand_object *obj_desc; u32 previous_value; acpi_status status; u32 i; /* * Entries (subpackages) in the _PSS Package must be sorted by power * dissipation, in descending order. If it appears that the list is * incorrectly sorted, sort it. We sort by cpu_frequency, since this * should be proportional to the power. */ status = acpi_ns_check_sorted_list(info, return_object, 0, 6, 0, ACPI_SORT_DESCENDING, "CpuFrequency"); if (ACPI_FAILURE(status)) { return (status); } /* * We now know the list is correctly sorted by CPU frequency. Check if * the power dissipation values are proportional. */ previous_value = ACPI_UINT32_MAX; outer_elements = return_object->package.elements; outer_element_count = return_object->package.count; for (i = 0; i < outer_element_count; i++) { elements = (*outer_elements)->package.elements; obj_desc = elements[1]; /* Index1 = power_dissipation */ if ((u32) obj_desc->integer.value > previous_value) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "SubPackage[%u,%u] - suspicious power dissipation values", i - 1, i)); } previous_value = (u32) obj_desc->integer.value; outer_elements++; } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_repair_TSS * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if object is OK or was repaired successfully * * DESCRIPTION: Repair for the _TSS object. If necessary, sort the object list * descending by the power dissipation values. * *****************************************************************************/ static acpi_status acpi_ns_repair_TSS(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status; struct acpi_namespace_node *node; /* * We can only sort the _TSS return package if there is no _PSS in the * same scope. This is because if _PSS is present, the ACPI specification * dictates that the _TSS Power Dissipation field is to be ignored, and * therefore some BIOSs leave garbage values in the _TSS Power field(s). * In this case, it is best to just return the _TSS package as-is. * (May, 2011) */ status = acpi_ns_get_node(info->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node); if (ACPI_SUCCESS(status)) { return (AE_OK); } status = acpi_ns_check_sorted_list(info, return_object, 0, 5, 1, ACPI_SORT_DESCENDING, "PowerDissipation"); return (status); } /****************************************************************************** * * FUNCTION: acpi_ns_check_sorted_list * * PARAMETERS: info - Method execution information block * return_object - Pointer to the top-level returned object * start_index - Index of the first subpackage * expected_count - Minimum length of each subpackage * sort_index - Subpackage entry to sort on * sort_direction - Ascending or descending * sort_key_name - Name of the sort_index field * * RETURN: Status. AE_OK if the list is valid and is sorted correctly or * has been repaired by sorting the list. * * DESCRIPTION: Check if the package list is valid and sorted correctly by the * sort_index. If not, then sort the list. * *****************************************************************************/ static acpi_status acpi_ns_check_sorted_list(struct acpi_evaluate_info *info, union acpi_operand_object *return_object, u32 start_index, u32 expected_count, u32 sort_index, u8 sort_direction, char *sort_key_name) { u32 outer_element_count; union acpi_operand_object **outer_elements; union acpi_operand_object **elements; union acpi_operand_object *obj_desc; u32 i; u32 previous_value; ACPI_FUNCTION_NAME(ns_check_sorted_list); /* The top-level object must be a package */ if (return_object->common.type != ACPI_TYPE_PACKAGE) { return (AE_AML_OPERAND_TYPE); } /* * NOTE: assumes list of subpackages contains no NULL elements. * Any NULL elements should have been removed by earlier call * to acpi_ns_remove_null_elements. */ outer_element_count = return_object->package.count; if (!outer_element_count || start_index >= outer_element_count) { return (AE_AML_PACKAGE_LIMIT); } outer_elements = &return_object->package.elements[start_index]; outer_element_count -= start_index; previous_value = 0; if (sort_direction == ACPI_SORT_DESCENDING) { previous_value = ACPI_UINT32_MAX; } /* Examine each subpackage */ for (i = 0; i < outer_element_count; i++) { /* Each element of the top-level package must also be a package */ if ((*outer_elements)->common.type != ACPI_TYPE_PACKAGE) { return (AE_AML_OPERAND_TYPE); } /* Each subpackage must have the minimum length */ if ((*outer_elements)->package.count < expected_count) { return (AE_AML_PACKAGE_LIMIT); } elements = (*outer_elements)->package.elements; obj_desc = elements[sort_index]; if (obj_desc->common.type != ACPI_TYPE_INTEGER) { return (AE_AML_OPERAND_TYPE); } /* * The list must be sorted in the specified order. If we detect a * discrepancy, sort the entire list. */ if (((sort_direction == ACPI_SORT_ASCENDING) && (obj_desc->integer.value < previous_value)) || ((sort_direction == ACPI_SORT_DESCENDING) && (obj_desc->integer.value > previous_value))) { acpi_ns_sort_list(&return_object->package. elements[start_index], outer_element_count, sort_index, sort_direction); info->return_flags |= ACPI_OBJECT_REPAIRED; ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Repaired unsorted list - now sorted by %s\n", info->full_pathname, sort_key_name)); return (AE_OK); } previous_value = (u32) obj_desc->integer.value; outer_elements++; } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_sort_list * * PARAMETERS: elements - Package object element list * count - Element count for above * index - Sort by which package element * sort_direction - Ascending or Descending sort * * RETURN: None * * DESCRIPTION: Sort the objects that are in a package element list. * * NOTE: Assumes that all NULL elements have been removed from the package, * and that all elements have been verified to be of type Integer. * *****************************************************************************/ static void acpi_ns_sort_list(union acpi_operand_object **elements, u32 count, u32 index, u8 sort_direction) { union acpi_operand_object *obj_desc1; union acpi_operand_object *obj_desc2; union acpi_operand_object *temp_obj; u32 i; u32 j; /* Simple bubble sort */ for (i = 1; i < count; i++) { for (j = (count - 1); j >= i; j--) { obj_desc1 = elements[j - 1]->package.elements[index]; obj_desc2 = elements[j]->package.elements[index]; if (((sort_direction == ACPI_SORT_ASCENDING) && (obj_desc1->integer.value > obj_desc2->integer.value)) || ((sort_direction == ACPI_SORT_DESCENDING) && (obj_desc1->integer.value < obj_desc2->integer.value))) { temp_obj = elements[j - 1]; elements[j - 1] = elements[j]; elements[j] = temp_obj; } } } } /****************************************************************************** * * FUNCTION: acpi_ns_remove_element * * PARAMETERS: obj_desc - Package object element list * index - Index of element to remove * * RETURN: None * * DESCRIPTION: Remove the requested element of a package and delete it. * *****************************************************************************/ static void acpi_ns_remove_element(union acpi_operand_object *obj_desc, u32 index) { union acpi_operand_object **source; union acpi_operand_object **dest; u32 count; u32 new_count; u32 i; ACPI_FUNCTION_NAME(ns_remove_element); count = obj_desc->package.count; new_count = count - 1; source = obj_desc->package.elements; dest = source; /* Examine all elements of the package object, remove matched index */ for (i = 0; i < count; i++) { if (i == index) { acpi_ut_remove_reference(*source); /* Remove one ref for being in pkg */ acpi_ut_remove_reference(*source); } else { *dest = *source; dest++; } source++; } /* NULL terminate list and update the package count */ *dest = NULL; obj_desc->package.count = new_count; }
AiJiaZone/linux-4.0
virt/drivers/acpi/acpica/nsrepair2.c
C
gpl-2.0
31,111
/* * Copyright (C) ST-Ericsson SA 2010 * * License terms: GNU General Public License (GPL) version 2 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com> * * RTC clock driver for the RTC part of the AB8500 Power management chip. * Based on RTC clock driver for the AB3100 Analog Baseband Chip by * Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/delay.h> #define AB8500_RTC_SOFF_STAT_REG 0x00 #define AB8500_RTC_CC_CONF_REG 0x01 #define AB8500_RTC_READ_REQ_REG 0x02 #define AB8500_RTC_WATCH_TSECMID_REG 0x03 #define AB8500_RTC_WATCH_TSECHI_REG 0x04 #define AB8500_RTC_WATCH_TMIN_LOW_REG 0x05 #define AB8500_RTC_WATCH_TMIN_MID_REG 0x06 #define AB8500_RTC_WATCH_TMIN_HI_REG 0x07 #define AB8500_RTC_ALRM_MIN_LOW_REG 0x08 #define AB8500_RTC_ALRM_MIN_MID_REG 0x09 #define AB8500_RTC_ALRM_MIN_HI_REG 0x0A #define AB8500_RTC_STAT_REG 0x0B #define AB8500_RTC_BKUP_CHG_REG 0x0C #define AB8500_RTC_FORCE_BKUP_REG 0x0D #define AB8500_RTC_CALIB_REG 0x0E #define AB8500_RTC_SWITCH_STAT_REG 0x0F /* RtcReadRequest bits */ #define RTC_READ_REQUEST 0x01 #define RTC_WRITE_REQUEST 0x02 /* RtcCtrl bits */ #define RTC_ALARM_ENA 0x04 #define RTC_STATUS_DATA 0x01 #define COUNTS_PER_SEC (0xF000 / 60) #define AB8500_RTC_EPOCH 2000 static const u8 ab8500_rtc_time_regs[] = { AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG, AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG, AB8500_RTC_WATCH_TSECMID_REG }; static const u8 ab8500_rtc_alarm_regs[] = { AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG, AB8500_RTC_ALRM_MIN_LOW_REG }; /* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ static unsigned long get_elapsed_seconds(int year) { unsigned long secs; struct rtc_time tm = { .tm_year = year - 1900, .tm_mday = 1, }; /* * This function calculates secs from 1970 and not from * 1900, even if we supply the offset from year 1900. */ rtc_tm_to_time(&tm, &secs); return secs; } static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long timeout = jiffies + HZ; int retval, i; unsigned long mins, secs; unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; u8 value; /* Request a data read */ retval = abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, RTC_READ_REQUEST); if (retval < 0) return retval; /* Early AB8500 chips will not clear the rtc read request bit */ if (abx500_get_chip_id(dev) == 0) { usleep_range(1000, 1000); } else { /* Wait for some cycles after enabling the rtc read in ab8500 */ while (time_before(jiffies, timeout)) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value); if (retval < 0) return retval; if (!(value & RTC_READ_REQUEST)) break; usleep_range(1000, 5000); } } /* Read the Watchtime registers */ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, ab8500_rtc_time_regs[i], &value); if (retval < 0) return retval; buf[i] = value; } mins = (buf[0] << 16) | (buf[1] << 8) | buf[2]; secs = (buf[3] << 8) | buf[4]; secs = secs / COUNTS_PER_SEC; secs = secs + (mins * 60); /* Add back the initially subtracted number of seconds */ secs += get_elapsed_seconds(AB8500_RTC_EPOCH); rtc_time_to_tm(secs, tm); return rtc_valid_tm(tm); } static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; unsigned long no_secs, no_mins, secs = 0; if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) { dev_dbg(dev, "year should be equal to or greater than %d\n", AB8500_RTC_EPOCH); return -EINVAL; } /* Get the number of seconds since 1970 */ rtc_tm_to_time(tm, &secs); /* * Convert it to the number of seconds since 01-01-2000 00:00:00, since * we only have a small counter in the RTC. */ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); no_mins = secs / 60; no_secs = secs % 60; /* Make the seconds count as per the RTC resolution */ no_secs = no_secs * COUNTS_PER_SEC; buf[4] = no_secs & 0xFF; buf[3] = (no_secs >> 8) & 0xFF; buf[2] = no_mins & 0xFF; buf[1] = (no_mins >> 8) & 0xFF; buf[0] = (no_mins >> 16) & 0xFF; for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { retval = abx500_set_register_interruptible(dev, AB8500_RTC, ab8500_rtc_time_regs[i], buf[i]); if (retval < 0) return retval; } /* Request a data write */ return abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST); } static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; u8 rtc_ctrl, value; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; unsigned long secs, mins; /* Check if the alarm is enabled or not */ retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_STAT_REG, &rtc_ctrl); if (retval < 0) return retval; if (rtc_ctrl & RTC_ALARM_ENA) alarm->enabled = 1; else alarm->enabled = 0; alarm->pending = 0; for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, ab8500_rtc_alarm_regs[i], &value); if (retval < 0) return retval; buf[i] = value; } mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]); secs = mins * 60; /* Add back the initially subtracted number of seconds */ secs += get_elapsed_seconds(AB8500_RTC_EPOCH); rtc_time_to_tm(secs, &alarm->time); return rtc_valid_tm(&alarm->time); } static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled) { return abx500_mask_and_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_STAT_REG, RTC_ALARM_ENA, enabled ? RTC_ALARM_ENA : 0); } static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; unsigned long mins, secs = 0; if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { dev_dbg(dev, "year should be equal to or greater than %d\n", AB8500_RTC_EPOCH); return -EINVAL; } /* Get the number of seconds since 1970 */ rtc_tm_to_time(&alarm->time, &secs); /* * Convert it to the number of seconds since 01-01-2000 00:00:00, since * we only have a small counter in the RTC. */ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); mins = secs / 60; buf[2] = mins & 0xFF; buf[1] = (mins >> 8) & 0xFF; buf[0] = (mins >> 16) & 0xFF; /* Set the alarm time */ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { retval = abx500_set_register_interruptible(dev, AB8500_RTC, ab8500_rtc_alarm_regs[i], buf[i]); if (retval < 0) return retval; } return ab8500_rtc_irq_enable(dev, alarm->enabled); } static int ab8500_rtc_set_calibration(struct device *dev, int calibration) { int retval; u8 rtccal = 0; /* * Check that the calibration value (which is in units of 0.5 * parts-per-million) is in the AB8500's range for RtcCalibration * register. -128 (0x80) is not permitted because the AB8500 uses * a sign-bit rather than two's complement, so 0x80 is just another * representation of zero. */ if ((calibration < -127) || (calibration > 127)) { dev_err(dev, "RtcCalibration value outside permitted range\n"); return -EINVAL; } /* * The AB8500 uses sign (in bit7) and magnitude (in bits0-7) * so need to convert to this sort of representation before writing * into RtcCalibration register... */ if (calibration >= 0) rtccal = 0x7F & calibration; else rtccal = ~(calibration - 1) | 0x80; retval = abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_CALIB_REG, rtccal); return retval; } static int ab8500_rtc_get_calibration(struct device *dev, int *calibration) { int retval; u8 rtccal = 0; retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_CALIB_REG, &rtccal); if (retval >= 0) { /* * The AB8500 uses sign (in bit7) and magnitude (in bits0-7) * so need to convert value from RtcCalibration register into * a two's complement signed value... */ if (rtccal & 0x80) *calibration = 0 - (rtccal & 0x7F); else *calibration = 0x7F & rtccal; } return retval; } static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; int calibration = 0; if (sscanf(buf, " %i ", &calibration) != 1) { dev_err(dev, "Failed to store RTC calibration attribute\n"); return -EINVAL; } retval = ab8500_rtc_set_calibration(dev, calibration); return retval ? retval : count; } static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev, struct device_attribute *attr, char *buf) { int retval = 0; int calibration = 0; retval = ab8500_rtc_get_calibration(dev, &calibration); if (retval < 0) { dev_err(dev, "Failed to read RTC calibration attribute\n"); sprintf(buf, "0\n"); return retval; } return sprintf(buf, "%d\n", calibration); } static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR, ab8500_sysfs_show_rtc_calibration, ab8500_sysfs_store_rtc_calibration); static int ab8500_sysfs_rtc_register(struct device *dev) { return device_create_file(dev, &dev_attr_rtc_calibration); } static void ab8500_sysfs_rtc_unregister(struct device *dev) { device_remove_file(dev, &dev_attr_rtc_calibration); } static irqreturn_t rtc_alarm_handler(int irq, void *data) { struct rtc_device *rtc = data; unsigned long events = RTC_IRQF | RTC_AF; dev_dbg(&rtc->dev, "%s\n", __func__); rtc_update_irq(rtc, 1, events); return IRQ_HANDLED; } static const struct rtc_class_ops ab8500_rtc_ops = { .read_time = ab8500_rtc_read_time, .set_time = ab8500_rtc_set_time, .read_alarm = ab8500_rtc_read_alarm, .set_alarm = ab8500_rtc_set_alarm, .alarm_irq_enable = ab8500_rtc_irq_enable, }; static int __devinit ab8500_rtc_probe(struct platform_device *pdev) { int err; struct rtc_device *rtc; u8 rtc_ctrl; int irq; irq = platform_get_irq_byname(pdev, "ALARM"); if (irq < 0) return irq; /* For RTC supply test */ err = abx500_mask_and_set_register_interruptible(&pdev->dev, AB8500_RTC, AB8500_RTC_STAT_REG, RTC_STATUS_DATA, RTC_STATUS_DATA); if (err < 0) return err; /* Wait for reset by the PorRtc */ usleep_range(1000, 5000); err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC, AB8500_RTC_STAT_REG, &rtc_ctrl); if (err < 0) return err; /* Check if the RTC Supply fails */ if (!(rtc_ctrl & RTC_STATUS_DATA)) { dev_err(&pdev->dev, "RTC supply failure\n"); return -ENODEV; } device_init_wakeup(&pdev->dev, true); rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "Registration failed\n"); err = PTR_ERR(rtc); return err; } err = request_threaded_irq(irq, NULL, rtc_alarm_handler, IRQF_NO_SUSPEND, "ab8500-rtc", rtc); if (err < 0) { rtc_device_unregister(rtc); return err; } platform_set_drvdata(pdev, rtc); err = ab8500_sysfs_rtc_register(&pdev->dev); if (err) { dev_err(&pdev->dev, "sysfs RTC failed to register\n"); return err; } return 0; } static int __devexit ab8500_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); int irq = platform_get_irq_byname(pdev, "ALARM"); ab8500_sysfs_rtc_unregister(&pdev->dev); free_irq(irq, rtc); rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver ab8500_rtc_driver = { .driver = { .name = "ab8500-rtc", .owner = THIS_MODULE, }, .probe = ab8500_rtc_probe, .remove = __devexit_p(ab8500_rtc_remove), }; module_platform_driver(ab8500_rtc_driver); MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>"); MODULE_DESCRIPTION("AB8500 RTC Driver"); MODULE_LICENSE("GPL v2");
itgb/opCloudRouter
qca/src/linux/drivers/rtc/rtc-ab8500.c
C
gpl-2.0
12,132
#include QMK_KEYBOARD_H #include "debug.h" #include "action_layer.h" #define BASE 0 // default layer #define FN1 1 // media layer #define CAPS_CTL CTL_T(KC_CAPS) // Caps on tap, Ctrl on hold. #define COPY LCTL(KC_V) // C-c Copy #define PASTE LCTL(KC_V) // C-v Paste #define ZM_NRM LCTL(KC_0) // C-0 Zoom Normal #define ZM_OUT LCTL(KC_MINS) // C-- Zoom Out #define ZM_IN LCTL(KC_PLUS) // C-+ Zoom In #define EM_UNDO LCTL(KC_UNDS) // C-_ Emacs Undo #define _MOB 1 // Mobile# #define _CUS1 2 // Custom macro 1 #define _CUS2 3 // Custom macro 2 const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { /* Keymap 0: Basic layer * * ,--------------------------------------------------. ,--------------------------------------------------. * | ` | 1 | 2 | 3 | 4 | 5 | 6 | | 7 | 8 | 9 | 0 | - | = | BSpace | * |--------+------+------+------+------+------+------| |------+------+------+------+------+------+--------| * | Tab | Q | W | E | R | T | Fwd | | Back | Y | U | I | O | P | \ | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * |Caps/Ctl| A | S | D | F | G |------| |------| H | J | K | L | ; | Enter | * |--------+------+------+------+------+------| PgDn | | PgUp |------+------+------+------+------+--------| * | LShift | Z | X | C | V | B | | | | N | M | , | . | / | ' | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | Ctrl | Esc | LGui | Alt | Alt | | Left | Dn | Up | Right| Fn | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | Copy | ( | | ) | Paste| * ,------|------+------| |------+------+------. * | | | [ | | ] | | | * |Space | Del |------| |------| Enter|BSpace| * | | | { | | } | | | * `--------------------' `--------------------' */ // If it accepts an argument (i.e, is a function), it doesn't need KC_. // Otherwise, it needs KC_* [BASE] = LAYOUT_ergodox( // layer 0 : default // Left hand KC_GRV, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_WBAK, CAPS_CTL, KC_A, KC_S, KC_D, KC_F, KC_G, KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_PGDN, KC_LCTL, KC_ESC, KC_LGUI, KC_LALT, KC_LALT, COPY, KC_LCBR, KC_LPRN, KC_SPC, KC_DEL, KC_LBRC, // Right hand KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC, KC_WFWD, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_BSLS, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_ENT, KC_PGUP, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_QUOT, KC_LEFT, KC_DOWN, KC_UP, KC_RIGHT, TG(FN1), KC_RCBR, PASTE, KC_RPRN, KC_RBRC, KC_ENT, KC_BSPC), /* Keymap 1: Fn Keys, media and mouse keys * * ,--------------------------------------------------. ,--------------------------------------------------. * | Esc | F1 | F2 | F3 | F4 | F5 | F6 | | F7 | F8 | F9 | F10 | F11 | F12 | BSpace | * |--------+------+------+------+------+------+------| |------+------+------+------+------+------+--------| * | | | | MsUp | | | | | | | | | | | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | |MsLeft|MsDown|MsRght| |------| |------| | | | | | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | | LClk | MClk | RClk | | | | | | | | | | | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * |Teensy| | ZmNrm| ZmOut| ZmIn | | Undo |VolDn |VolUp | Mute | | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | | | | | | * ,------|------+------| |------+------+------. * | | | | | | | | * | | |------| |------| | | * | | | | | | | | * `--------------------' `--------------------' */ // FN1 Layer [FN1] = LAYOUT_ergodox( // Left hand KC_ESC, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_TRNS, KC_TRNS, KC_TRNS, KC_MS_U, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_MS_L, KC_MS_D, KC_MS_R, KC_TRNS, KC_TRNS, KC_TRNS, KC_BTN1, KC_BTN3, KC_BTN2, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, ZM_NRM, ZM_OUT, ZM_IN, KC_TRNS, KC_TRNS, KC_TRNS, RESET, KC_TRNS, KC_TRNS, // Right hand KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, KC_BSPC, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_MPLY, KC_TRNS, M(_MOB), KC_TRNS, M(_CUS1),M(_CUS2),KC_TRNS, KC_TRNS, EM_UNDO, KC_VOLD, KC_VOLU, KC_MUTE, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS ), }; const uint16_t PROGMEM fn_actions[] = { }; const macro_t *action_get_macro(keyrecord_t *record, uint8_t id, uint8_t opt) { // MACRODOWN only works in this function switch(id) { case _MOB: // Your mobile# here. return MACRODOWN(T(1), T(2), T(3), T(MINS), T(1), T(2), T(3), T(MINS), T(1), T(2), T(3), T(4), END); case _CUS1: // Your custom macro 1 return MACRODOWN(T(E), T(M), T(A), T(C), T(S), T(SPC), END); case _CUS2: // Your custom macro 2 return MACRODOWN(T(L), T(S), T(SPC), T(MINS), T(L), T(ENT), END); }; return MACRO_NONE; }; // Runs just one time when the keyboard initializes. void matrix_init_user(void) { }; // Runs constantly in the background, in a loop. void matrix_scan_user(void) { uint8_t layer = biton32(layer_state); ergodox_board_led_off(); ergodox_right_led_1_off(); ergodox_right_led_2_off(); ergodox_right_led_3_off(); switch (layer) { // TODO: Make this relevant to the ErgoDox EZ. case 1: ergodox_right_led_1_on(); break; case 2: ergodox_right_led_2_on(); break; default: // none break; } };
kll/qmk_firmware
layouts/community/ergodox/ab/keymap.c
C
gpl-2.0
7,321
/* * Copyright (C) 2014 Freie Universität Berlin * * This file is subject to the terms and conditions of the GNU Lesser General * Public License v2.1. See the file LICENSE in the top level directory for more * details. */ /** * @ingroup tests * @{ * * @file * @brief Application for testing low-level SPI driver implementations * * This implementation covers both, master and slave configurations. * * @author Hauke Petersen <hauke.petersen@fu-berlin.de> * * @} */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "xtimer.h" #include "shell.h" #include "periph/spi.h" /** * @brief Some parameters used for benchmarking */ #define BENCH_REDOS (1000) #define BENCH_SMALL (2) #define BENCH_LARGE (100) #define BENCH_PAYLOAD ('b') #define BENCH_REGADDR (0x23) #define BUF_SIZE (512U) /** * @brief Benchmark buffers */ static uint8_t bench_wbuf[BENCH_LARGE]; static uint8_t bench_rbuf[BENCH_LARGE]; /** * @brief Generic buffer used for receiving */ static uint8_t buf[BUF_SIZE]; static struct { spi_t dev; spi_mode_t mode; spi_clk_t clk; spi_cs_t cs; } spiconf; void print_bytes(char* title, uint8_t* data, size_t len) { printf("%4s\n", title); for (size_t i = 0; i < len; i++) { printf(" %2i ", (int)i); } printf("\n "); for (size_t i = 0; i < len; i++) { printf(" 0x%02x", (int)data[i]); } printf("\n "); for (size_t i = 0; i < len; i++) { if (data[i] < ' ' || data[i] > '~') { printf(" ?? "); } else { printf(" %c ", (char)data[i]); } } printf("\n\n"); } int cmd_init(int argc, char **argv) { int dev, mode, clk, port, pin, tmp; if (argc < 5) { printf("usage: %s <dev> <mode> <clk> <cs port> <cs pin>\n", argv[0]); puts("\tdev:"); for (int i = 0; i < (int)SPI_NUMOF; i++) { printf("\t\t%i: SPI_DEV(%i)\n", i, i); } puts("\tmode:"); puts("\t\t0: POL:0, PHASE:0 - on first rising edge"); puts("\t\t1: POL:0, PHASE:1 - on second rising edge"); puts("\t\t2: POL:1, PHASE:0 - on first falling edge"); puts("\t\t3: POL:1, PHASE:1 - on second falling edge"); puts("\tclk:"); puts("\t\t0: 100 KHz"); puts("\t\t1: 400 KHz"); puts("\t\t2: 1 MHz"); puts("\t\t3: 5 MHz"); puts("\t\t4: 10 MHz"); puts("\tcs port:"); puts("\t\tPort of the CS pin, set to -1 for hardware chip select"); puts("\tcs pin:"); puts("\t\tPin used for chip select. If hardware chip select is enabled,\n" "\t\tthis value specifies the internal HWCS line"); return 1; } /* parse the given SPI device */ dev = atoi(argv[1]); if (dev < 0 || dev >= (int)SPI_NUMOF) { puts("error: invalid SPI device specified"); return 1; } spiconf.dev = SPI_DEV(dev); /* parse the SPI mode */ mode = atoi(argv[2]); switch (mode) { case 0: spiconf.mode = SPI_MODE_0; break; case 1: spiconf.mode = SPI_MODE_1; break; case 2: spiconf.mode = SPI_MODE_2; break; case 3: spiconf.mode = SPI_MODE_3; break; default: puts("error: invalid SPI mode specified"); return 1; } /* parse the targeted clock speed */ clk = atoi(argv[3]); switch (clk) { case 0: spiconf.clk = SPI_CLK_100KHZ; break; case 1: spiconf.clk = SPI_CLK_400KHZ; break; case 2: spiconf.clk = SPI_CLK_1MHZ; break; case 3: spiconf.clk = SPI_CLK_5MHZ; break; case 4: spiconf.clk = SPI_CLK_10MHZ; break; default: puts("error: invalid bus speed specified"); return 1; } /* parse chip select port and pin */ port = atoi(argv[4]); pin = atoi(argv[5]); if (pin < 0 || port < -1) { puts("error: invalid CS port/pin combination specified"); } if (port == -1) { /* hardware chip select line */ spiconf.cs = SPI_HWCS(pin); } else { spiconf.cs = (spi_cs_t)GPIO_PIN(port, pin); } /* test setup */ tmp = spi_init_cs(spiconf.dev, spiconf.cs); if (tmp != SPI_OK) { puts("error: unable to initialize the given chip select line"); return 1; } tmp = spi_acquire(spiconf.dev, spiconf.cs, spiconf.mode, spiconf.clk); if (tmp == SPI_NOMODE) { puts("error: given SPI mode is not supported"); return 1; } else if (tmp == SPI_NOCLK) { puts("error: targeted clock speed is not supported"); return 1; } else if (tmp != SPI_OK) { puts("error: unable to acquire bus with given parameters"); return 1; } spi_release(spiconf.dev); printf("SPI_DEV(%i) initialized: mode: %i, clk: %i, cs_port: %i, cs_pin: %i\n", dev, mode, clk, port, pin); return 0; } int cmd_transfer(int argc, char **argv) { size_t len; if (argc < 2) { printf("usage: %s <data>\n", argv[0]); return 1; } if (spiconf.dev == SPI_UNDEF) { puts("error: SPI is not initialized, please initialize bus first"); return 1; } /* get bus access */ if (spi_acquire(spiconf.dev, spiconf.cs, spiconf.mode, spiconf.clk) != SPI_OK) { puts("error: unable to acquire the SPI bus"); return 1; } /* transfer data */ len = strlen(argv[1]); memset(buf, 0, sizeof(buf)); spi_transfer_bytes(spiconf.dev, spiconf.cs, false, argv[1], buf, len); /* release the bus */ spi_release(spiconf.dev); /* print results */ print_bytes("Sent bytes", (uint8_t *)argv[1], len); print_bytes("Received bytes", buf, len); return 0; } int cmd_bench(int argc, char **argv) { (void)argc; (void)argv; uint32_t start, stop; uint32_t sum = 0; uint8_t in; uint8_t out = (uint8_t)BENCH_PAYLOAD; if (spiconf.dev == SPI_UNDEF) { puts("error: SPI is not initialized, please initialize bus first"); return 1; } /* prepare buffer */ memset(bench_wbuf, BENCH_PAYLOAD, BENCH_LARGE); /* get access to the bus */ if (spi_acquire(spiconf.dev, spiconf.cs, spiconf.mode, spiconf.clk) != SPI_OK) { puts("error: unable to acquire the SPI bus"); return 1; } puts("### Running some benchmarks, all values in [us] ###\n"); /* 1 - write 1000 times 1 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { in = spi_transfer_byte(spiconf.dev, spiconf.cs, false, out); (void)in; } stop = xtimer_now_usec(); printf(" 1 - write %i times %i byte:", BENCH_REDOS, 1); printf("\t\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 2 - write 1000 times 2 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, bench_wbuf, NULL, BENCH_SMALL); } stop = xtimer_now_usec(); printf(" 2 - write %i times %i byte:", BENCH_REDOS, BENCH_SMALL); printf("\t\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 3 - write 1000 times 100 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, bench_wbuf, NULL, BENCH_LARGE); } stop = xtimer_now_usec(); printf(" 3 - write %i times %i byte:", BENCH_REDOS, BENCH_LARGE); printf("\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 4 - write 1000 times 1 byte to register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { in = spi_transfer_reg(spiconf.dev, spiconf.cs, BENCH_REGADDR, out); (void)in; } stop = xtimer_now_usec(); printf(" 4 - write %i times %i byte to register:", BENCH_REDOS, 1); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 5 - write 1000 times 2 byte to register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, bench_wbuf, NULL, BENCH_SMALL); } stop = xtimer_now_usec(); printf(" 5 - write %i times %i byte to register:", BENCH_REDOS, BENCH_SMALL); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 6 - write 1000 times 100 byte to register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, bench_wbuf, NULL, BENCH_LARGE); } stop = xtimer_now_usec(); printf(" 6 - write %i times %i byte to register:", BENCH_REDOS, BENCH_LARGE); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 7 - read 1000 times 2 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, NULL, bench_rbuf, BENCH_SMALL); } stop = xtimer_now_usec(); printf(" 7 - read %i times %i byte:", BENCH_REDOS, BENCH_SMALL); printf("\t\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 8 - read 1000 times 100 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, NULL, bench_rbuf, BENCH_LARGE); } stop = xtimer_now_usec(); printf(" 8 - read %i times %i byte:", BENCH_REDOS, BENCH_LARGE); printf("\t\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 9 - read 1000 times 2 byte from register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, NULL, bench_rbuf, BENCH_SMALL); } stop = xtimer_now_usec(); printf(" 9 - read %i times %i byte from register:", BENCH_REDOS, BENCH_SMALL); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 10 - read 1000 times 100 byte from register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, NULL, bench_rbuf, BENCH_LARGE); } stop = xtimer_now_usec(); printf("10 - read %i times %i byte from register:", BENCH_REDOS, BENCH_LARGE); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 11 - transfer 1000 times 2 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, bench_wbuf, bench_rbuf, BENCH_SMALL); } stop = xtimer_now_usec(); printf("11 - transfer %i times %i byte:", BENCH_REDOS, BENCH_SMALL); printf("\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 12 - transfer 1000 times 100 byte */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_bytes(spiconf.dev, spiconf.cs, false, bench_wbuf, bench_rbuf, BENCH_LARGE); } stop = xtimer_now_usec(); printf("12 - transfer %i times %i byte:", BENCH_REDOS, BENCH_LARGE); printf("\t\t%i\n", (int)(stop - start)); sum += (stop - start); /* 13 - transfer 1000 times 2 byte from/to register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, bench_wbuf, bench_rbuf, BENCH_SMALL); } stop = xtimer_now_usec(); printf("13 - transfer %i times %i byte to register:", BENCH_REDOS, BENCH_SMALL); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); /* 14 - transfer 1000 times 100 byte from/to register */ start = xtimer_now_usec(); for (int i = 0; i < BENCH_REDOS; i++) { spi_transfer_regs(spiconf.dev, spiconf.cs, BENCH_REGADDR, bench_wbuf, bench_rbuf, BENCH_LARGE); } stop = xtimer_now_usec(); printf("14 - transfer %i times %i byte to register:", BENCH_REDOS, BENCH_LARGE); printf("\t%i\n", (int)(stop - start)); sum += (stop - start); printf("-- - SUM:\t\t\t\t\t%i\n", (int)sum); spi_release(spiconf.dev); puts("\n### All runs complete ###"); return 0; } static const shell_command_t shell_commands[] = { { "init", "Setup a particular SPI configuration", cmd_init }, { "send", "Transfer string to slave", cmd_transfer }, { "bench", "Runs some benchmarks", cmd_bench }, { NULL, NULL, NULL } }; int main(void) { puts("Manual SPI peripheral driver test"); puts("Refer to the README.md file for more information.\n"); printf("There are %i SPI devices configured for your platform.\n", (int)SPI_NUMOF); /* reset local SPI configuration */ spiconf.dev = SPI_UNDEF; /* run the shell */ char line_buf[SHELL_DEFAULT_BUFSIZE]; shell_run(shell_commands, line_buf, SHELL_DEFAULT_BUFSIZE); return 0; }
lazytech-org/RIOT
tests/periph_spi/main.c
C
lgpl-2.1
13,326
/* * Simplified MAC Kernel (smack) security module * * This file contains the smack hook function implementations. * * Author: * Casey Schaufler <casey@schaufler-ca.com> * * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * Paul Moore <paul.moore@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/xattr.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/stat.h> #include <linux/ext2_fs.h> #include <linux/kd.h> #include <asm/ioctls.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/mutex.h> #include <linux/pipe_fs_i.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/audit.h> #include "smack.h" #define task_security(task) (task_cred_xxx((task), security)) /* * I hope these are the hokeyist lines of code in the module. Casey. */ #define DEVPTS_SUPER_MAGIC 0x1cd1 #define SOCKFS_MAGIC 0x534F434B #define TMPFS_MAGIC 0x01021994 /** * smk_fetch - Fetch the smack label from a file. * @ip: a pointer to the inode * @dp: a pointer to the dentry * * Returns a pointer to the master list entry for the Smack label * or NULL if there was no label to fetch. */ static char *smk_fetch(struct inode *ip, struct dentry *dp) { int rc; char in[SMK_LABELLEN]; if (ip->i_op->getxattr == NULL) return NULL; rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN); if (rc < 0) return NULL; return smk_import(in, rc); } /** * new_inode_smack - allocate an inode security blob * @smack: a pointer to the Smack label to use in the blob * * Returns the new blob or NULL if there's no memory available */ struct inode_smack *new_inode_smack(char *smack) { struct inode_smack *isp; isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL); if (isp == NULL) return NULL; isp->smk_inode = smack; isp->smk_flags = 0; mutex_init(&isp->smk_lock); return isp; } /* * LSM hooks. * We he, that is fun! */ /** * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH * @ctp: child task pointer * @mode: ptrace attachment mode * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require read and write. */ static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode) { int rc; rc = cap_ptrace_may_access(ctp, mode); if (rc != 0) return rc; rc = smk_access(current_security(), task_security(ctp), MAY_READWRITE); if (rc != 0 && capable(CAP_MAC_OVERRIDE)) return 0; return rc; } /** * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME * @ptp: parent task pointer * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require read and write. */ static int smack_ptrace_traceme(struct task_struct *ptp) { int rc; rc = cap_ptrace_traceme(ptp); if (rc != 0) return rc; rc = smk_access(task_security(ptp), current_security(), MAY_READWRITE); if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) return 0; return rc; } /** * smack_syslog - Smack approval on syslog * @type: message type * * Require that the task has the floor label * * Returns 0 on success, error code otherwise. */ static int smack_syslog(int type) { int rc; char *sp = current_security(); rc = cap_syslog(type); if (rc != 0) return rc; if (capable(CAP_MAC_OVERRIDE)) return 0; if (sp != smack_known_floor.smk_known) rc = -EACCES; return rc; } /* * Superblock Hooks. */ /** * smack_sb_alloc_security - allocate a superblock blob * @sb: the superblock getting the blob * * Returns 0 on success or -ENOMEM on error. */ static int smack_sb_alloc_security(struct super_block *sb) { struct superblock_smack *sbsp; sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL); if (sbsp == NULL) return -ENOMEM; sbsp->smk_root = smack_known_floor.smk_known; sbsp->smk_default = smack_known_floor.smk_known; sbsp->smk_floor = smack_known_floor.smk_known; sbsp->smk_hat = smack_known_hat.smk_known; sbsp->smk_initialized = 0; spin_lock_init(&sbsp->smk_sblock); sb->s_security = sbsp; return 0; } /** * smack_sb_free_security - free a superblock blob * @sb: the superblock getting the blob * */ static void smack_sb_free_security(struct super_block *sb) { kfree(sb->s_security); sb->s_security = NULL; } /** * smack_sb_copy_data - copy mount options data for processing * @orig: where to start * @smackopts: mount options string * * Returns 0 on success or -ENOMEM on error. * * Copy the Smack specific mount options out of the mount * options list. */ static int smack_sb_copy_data(char *orig, char *smackopts) { char *cp, *commap, *otheropts, *dp; otheropts = (char *)get_zeroed_page(GFP_KERNEL); if (otheropts == NULL) return -ENOMEM; for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) { if (strstr(cp, SMK_FSDEFAULT) == cp) dp = smackopts; else if (strstr(cp, SMK_FSFLOOR) == cp) dp = smackopts; else if (strstr(cp, SMK_FSHAT) == cp) dp = smackopts; else if (strstr(cp, SMK_FSROOT) == cp) dp = smackopts; else dp = otheropts; commap = strchr(cp, ','); if (commap != NULL) *commap = '\0'; if (*dp != '\0') strcat(dp, ","); strcat(dp, cp); } strcpy(orig, otheropts); free_page((unsigned long)otheropts); return 0; } /** * smack_sb_kern_mount - Smack specific mount processing * @sb: the file system superblock * @flags: the mount flags * @data: the smack mount options * * Returns 0 on success, an error code on failure */ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) { struct dentry *root = sb->s_root; struct inode *inode = root->d_inode; struct superblock_smack *sp = sb->s_security; struct inode_smack *isp; char *op; char *commap; char *nsp; spin_lock(&sp->smk_sblock); if (sp->smk_initialized != 0) { spin_unlock(&sp->smk_sblock); return 0; } sp->smk_initialized = 1; spin_unlock(&sp->smk_sblock); for (op = data; op != NULL; op = commap) { commap = strchr(op, ','); if (commap != NULL) *commap++ = '\0'; if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) { op += strlen(SMK_FSHAT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_hat = nsp; } else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) { op += strlen(SMK_FSFLOOR); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_floor = nsp; } else if (strncmp(op, SMK_FSDEFAULT, strlen(SMK_FSDEFAULT)) == 0) { op += strlen(SMK_FSDEFAULT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_default = nsp; } else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) { op += strlen(SMK_FSROOT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_root = nsp; } } /* * Initialize the root inode. */ isp = inode->i_security; if (isp == NULL) inode->i_security = new_inode_smack(sp->smk_root); else isp->smk_inode = sp->smk_root; return 0; } /** * smack_sb_statfs - Smack check on statfs * @dentry: identifies the file system in question * * Returns 0 if current can read the floor of the filesystem, * and error code otherwise */ static int smack_sb_statfs(struct dentry *dentry) { struct superblock_smack *sbp = dentry->d_sb->s_security; return smk_curacc(sbp->smk_floor, MAY_READ); } /** * smack_sb_mount - Smack check for mounting * @dev_name: unused * @path: mount point * @type: unused * @flags: unused * @data: unused * * Returns 0 if current can write the floor of the filesystem * being mounted on, an error code otherwise. */ static int smack_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data) { struct superblock_smack *sbp = path->mnt->mnt_sb->s_security; return smk_curacc(sbp->smk_floor, MAY_WRITE); } /** * smack_sb_umount - Smack check for unmounting * @mnt: file system to unmount * @flags: unused * * Returns 0 if current can write the floor of the filesystem * being unmounted, an error code otherwise. */ static int smack_sb_umount(struct vfsmount *mnt, int flags) { struct superblock_smack *sbp; sbp = mnt->mnt_sb->s_security; return smk_curacc(sbp->smk_floor, MAY_WRITE); } /* * Inode hooks */ /** * smack_inode_alloc_security - allocate an inode blob * @inode: the inode in need of a blob * * Returns 0 if it gets a blob, -ENOMEM otherwise */ static int smack_inode_alloc_security(struct inode *inode) { inode->i_security = new_inode_smack(current_security()); if (inode->i_security == NULL) return -ENOMEM; return 0; } /** * smack_inode_free_security - free an inode blob * @inode: the inode with a blob * * Clears the blob pointer in inode */ static void smack_inode_free_security(struct inode *inode) { kfree(inode->i_security); inode->i_security = NULL; } /** * smack_inode_init_security - copy out the smack from an inode * @inode: the inode * @dir: unused * @name: where to put the attribute name * @value: where to put the attribute value * @len: where to put the length of the attribute * * Returns 0 if it all works out, -ENOMEM if there's no memory */ static int smack_inode_init_security(struct inode *inode, struct inode *dir, char **name, void **value, size_t *len) { char *isp = smk_of_inode(inode); if (name) { *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL); if (*name == NULL) return -ENOMEM; } if (value) { *value = kstrdup(isp, GFP_KERNEL); if (*value == NULL) return -ENOMEM; } if (len) *len = strlen(isp) + 1; return 0; } /** * smack_inode_link - Smack check on link * @old_dentry: the existing object * @dir: unused * @new_dentry: the new object * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { int rc; char *isp; isp = smk_of_inode(old_dentry->d_inode); rc = smk_curacc(isp, MAY_WRITE); if (rc == 0 && new_dentry->d_inode != NULL) { isp = smk_of_inode(new_dentry->d_inode); rc = smk_curacc(isp, MAY_WRITE); } return rc; } /** * smack_inode_unlink - Smack check on inode deletion * @dir: containing directory object * @dentry: file to unlink * * Returns 0 if current can write the containing directory * and the object, error code otherwise */ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry) { struct inode *ip = dentry->d_inode; int rc; /* * You need write access to the thing you're unlinking */ rc = smk_curacc(smk_of_inode(ip), MAY_WRITE); if (rc == 0) /* * You also need write access to the containing directory */ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE); return rc; } /** * smack_inode_rmdir - Smack check on directory deletion * @dir: containing directory object * @dentry: directory to unlink * * Returns 0 if current can write the containing directory * and the directory, error code otherwise */ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry) { int rc; /* * You need write access to the thing you're removing */ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE); if (rc == 0) /* * You also need write access to the containing directory */ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE); return rc; } /** * smack_inode_rename - Smack check on rename * @old_inode: the old directory * @old_dentry: unused * @new_inode: the new directory * @new_dentry: unused * * Read and write access is required on both the old and * new directories. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry) { int rc; char *isp; isp = smk_of_inode(old_dentry->d_inode); rc = smk_curacc(isp, MAY_READWRITE); if (rc == 0 && new_dentry->d_inode != NULL) { isp = smk_of_inode(new_dentry->d_inode); rc = smk_curacc(isp, MAY_READWRITE); } return rc; } /** * smack_inode_permission - Smack version of permission() * @inode: the inode in question * @mask: the access requested * * This is the important Smack hook. * * Returns 0 if access is permitted, -EACCES otherwise */ static int smack_inode_permission(struct inode *inode, int mask) { /* * No permission to check. Existence test. Yup, it's there. */ if (mask == 0) return 0; return smk_curacc(smk_of_inode(inode), mask); } /** * smack_inode_setattr - Smack check for setting attributes * @dentry: the object * @iattr: for the force flag * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr) { /* * Need to allow for clearing the setuid bit. */ if (iattr->ia_valid & ATTR_FORCE) return 0; return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE); } /** * smack_inode_getattr - Smack check for getting attributes * @mnt: unused * @dentry: the object * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ); } /** * smack_inode_setxattr - Smack check for setting xattrs * @dentry: the object * @name: name of the attribute * @value: unused * @size: unused * @flags: unused * * This protects the Smack attribute explicitly. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { if (!capable(CAP_MAC_ADMIN)) rc = -EPERM; /* * check label validity here so import wont fail on * post_setxattr */ if (size == 0 || size >= SMK_LABELLEN || smk_import(value, size) == NULL) rc = -EINVAL; } else rc = cap_inode_setxattr(dentry, name, value, size, flags); if (rc == 0) rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE); return rc; } /** * smack_inode_post_setxattr - Apply the Smack update approved above * @dentry: object * @name: attribute name * @value: attribute value * @size: attribute size * @flags: unused * * Set the pointer in the inode blob to the entry found * in the master label list. */ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode_smack *isp; char *nsp; /* * Not SMACK */ if (strcmp(name, XATTR_NAME_SMACK)) return; isp = dentry->d_inode->i_security; /* * No locking is done here. This is a pointer * assignment. */ nsp = smk_import(value, size); if (nsp != NULL) isp->smk_inode = nsp; else isp->smk_inode = smack_known_invalid.smk_known; return; } /* * smack_inode_getxattr - Smack check on getxattr * @dentry: the object * @name: unused * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getxattr(struct dentry *dentry, const char *name) { return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ); } /* * smack_inode_removexattr - Smack check on removexattr * @dentry: the object * @name: name of the attribute * * Removing the Smack attribute requires CAP_MAC_ADMIN * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_removexattr(struct dentry *dentry, const char *name) { int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { if (!capable(CAP_MAC_ADMIN)) rc = -EPERM; } else rc = cap_inode_removexattr(dentry, name); if (rc == 0) rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE); return rc; } /** * smack_inode_getsecurity - get smack xattrs * @inode: the object * @name: attribute name * @buffer: where to put the result * @alloc: unused * * Returns the size of the attribute or an error code */ static int smack_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) { struct socket_smack *ssp; struct socket *sock; struct super_block *sbp; struct inode *ip = (struct inode *)inode; char *isp; int ilen; int rc = 0; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { isp = smk_of_inode(inode); ilen = strlen(isp) + 1; *buffer = isp; return ilen; } /* * The rest of the Smack xattrs are only on sockets. */ sbp = ip->i_sb; if (sbp->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(ip); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) isp = ssp->smk_in; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) isp = ssp->smk_out; else return -EOPNOTSUPP; ilen = strlen(isp) + 1; if (rc == 0) { *buffer = isp; rc = ilen; } return rc; } /** * smack_inode_listsecurity - list the Smack attributes * @inode: the object * @buffer: where they go * @buffer_size: size of buffer * * Returns 0 on success, -EINVAL otherwise */ static int smack_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { int len = strlen(XATTR_NAME_SMACK); if (buffer != NULL && len <= buffer_size) { memcpy(buffer, XATTR_NAME_SMACK, len); return len; } return -EINVAL; } /** * smack_inode_getsecid - Extract inode's security id * @inode: inode to extract the info from * @secid: where result will be saved */ static void smack_inode_getsecid(const struct inode *inode, u32 *secid) { struct inode_smack *isp = inode->i_security; *secid = smack_to_secid(isp->smk_inode); } /* * File Hooks */ /** * smack_file_permission - Smack check on file operations * @file: unused * @mask: unused * * Returns 0 * * Should access checks be done on each read or write? * UNICOS and SELinux say yes. * Trusted Solaris, Trusted Irix, and just about everyone else says no. * * I'll say no for now. Smack does not do the frequent * label changing that SELinux does. */ static int smack_file_permission(struct file *file, int mask) { return 0; } /** * smack_file_alloc_security - assign a file security blob * @file: the object * * The security blob for a file is a pointer to the master * label list, so no allocation is done. * * Returns 0 */ static int smack_file_alloc_security(struct file *file) { file->f_security = current_security(); return 0; } /** * smack_file_free_security - clear a file security blob * @file: the object * * The security blob for a file is a pointer to the master * label list, so no memory is freed. */ static void smack_file_free_security(struct file *file) { file->f_security = NULL; } /** * smack_file_ioctl - Smack check on ioctls * @file: the object * @cmd: what to do * @arg: unused * * Relies heavily on the correct use of the ioctl command conventions. * * Returns 0 if allowed, error code otherwise */ static int smack_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc = 0; if (_IOC_DIR(cmd) & _IOC_WRITE) rc = smk_curacc(file->f_security, MAY_WRITE); if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) rc = smk_curacc(file->f_security, MAY_READ); return rc; } /** * smack_file_lock - Smack check on file locking * @file: the object * @cmd: unused * * Returns 0 if current has write access, error code otherwise */ static int smack_file_lock(struct file *file, unsigned int cmd) { return smk_curacc(file->f_security, MAY_WRITE); } /** * smack_file_fcntl - Smack check on fcntl * @file: the object * @cmd: what action to check * @arg: unused * * Returns 0 if current has access, error code otherwise */ static int smack_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { int rc; switch (cmd) { case F_DUPFD: case F_GETFD: case F_GETFL: case F_GETLK: case F_GETOWN: case F_GETSIG: rc = smk_curacc(file->f_security, MAY_READ); break; case F_SETFD: case F_SETFL: case F_SETLK: case F_SETLKW: case F_SETOWN: case F_SETSIG: rc = smk_curacc(file->f_security, MAY_WRITE); break; default: rc = smk_curacc(file->f_security, MAY_READWRITE); } return rc; } /** * smack_file_set_fowner - set the file security blob value * @file: object in question * * Returns 0 * Further research may be required on this one. */ static int smack_file_set_fowner(struct file *file) { file->f_security = current_security(); return 0; } /** * smack_file_send_sigiotask - Smack on sigio * @tsk: The target task * @fown: the object the signal come from * @signum: unused * * Allow a privileged task to get signals even if it shouldn't * * Returns 0 if a subject with the object's smack could * write to the task, an error code otherwise. */ static int smack_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { struct file *file; int rc; /* * struct fown_struct is never outside the context of a struct file */ file = container_of(fown, struct file, f_owner); rc = smk_access(file->f_security, tsk->cred->security, MAY_WRITE); if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE)) return 0; return rc; } /** * smack_file_receive - Smack file receive check * @file: the object * * Returns 0 if current has access, error code otherwise */ static int smack_file_receive(struct file *file) { int may = 0; /* * This code relies on bitmasks. */ if (file->f_mode & FMODE_READ) may = MAY_READ; if (file->f_mode & FMODE_WRITE) may |= MAY_WRITE; return smk_curacc(file->f_security, may); } /* * Task hooks */ /** * smack_cred_free - "free" task-level security credentials * @cred: the credentials in question * * Smack isn't using copies of blobs. Everyone * points to an immutable list. The blobs never go away. * There is no leak here. */ static void smack_cred_free(struct cred *cred) { cred->security = NULL; } /** * smack_cred_prepare - prepare new set of credentials for modification * @new: the new credentials * @old: the original credentials * @gfp: the atomicity of any memory allocations * * Prepare a new set of credentials for modification. */ static int smack_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { new->security = old->security; return 0; } /** * smack_cred_commit - commit new credentials * @new: the new credentials * @old: the original credentials */ static void smack_cred_commit(struct cred *new, const struct cred *old) { } /** * smack_kernel_act_as - Set the subjective context in a set of credentials * @new: points to the set of credentials to be modified. * @secid: specifies the security ID to be set * * Set the security data for a kernel service. */ static int smack_kernel_act_as(struct cred *new, u32 secid) { char *smack = smack_from_secid(secid); if (smack == NULL) return -EINVAL; new->security = smack; return 0; } /** * smack_kernel_create_files_as - Set the file creation label in a set of creds * @new: points to the set of credentials to be modified * @inode: points to the inode to use as a reference * * Set the file creation context in a set of credentials to the same * as the objective context of the specified inode */ static int smack_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_smack *isp = inode->i_security; new->security = isp->smk_inode; return 0; } /** * smack_task_setpgid - Smack check on setting pgid * @p: the task object * @pgid: unused * * Return 0 if write access is permitted */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { return smk_curacc(task_security(p), MAY_WRITE); } /** * smack_task_getpgid - Smack access check for getpgid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getpgid(struct task_struct *p) { return smk_curacc(task_security(p), MAY_READ); } /** * smack_task_getsid - Smack access check for getsid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getsid(struct task_struct *p) { return smk_curacc(task_security(p), MAY_READ); } /** * smack_task_getsecid - get the secid of the task * @p: the object task * @secid: where to put the result * * Sets the secid to contain a u32 version of the smack label. */ static void smack_task_getsecid(struct task_struct *p, u32 *secid) { *secid = smack_to_secid(task_security(p)); } /** * smack_task_setnice - Smack check on setting nice * @p: the task object * @nice: unused * * Return 0 if write access is permitted */ static int smack_task_setnice(struct task_struct *p, int nice) { int rc; rc = cap_task_setnice(p, nice); if (rc == 0) rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } /** * smack_task_setioprio - Smack check on setting ioprio * @p: the task object * @ioprio: unused * * Return 0 if write access is permitted */ static int smack_task_setioprio(struct task_struct *p, int ioprio) { int rc; rc = cap_task_setioprio(p, ioprio); if (rc == 0) rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } /** * smack_task_getioprio - Smack check on reading ioprio * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getioprio(struct task_struct *p) { return smk_curacc(task_security(p), MAY_READ); } /** * smack_task_setscheduler - Smack check on setting scheduler * @p: the task object * @policy: unused * @lp: unused * * Return 0 if read access is permitted */ static int smack_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp) { int rc; rc = cap_task_setscheduler(p, policy, lp); if (rc == 0) rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } /** * smack_task_getscheduler - Smack check on reading scheduler * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getscheduler(struct task_struct *p) { return smk_curacc(task_security(p), MAY_READ); } /** * smack_task_movememory - Smack check on moving memory * @p: the task object * * Return 0 if write access is permitted */ static int smack_task_movememory(struct task_struct *p) { return smk_curacc(task_security(p), MAY_WRITE); } /** * smack_task_kill - Smack check on signal delivery * @p: the task object * @info: unused * @sig: unused * @secid: identifies the smack to use in lieu of current's * * Return 0 if write access is permitted * * The secid behavior is an artifact of an SELinux hack * in the USB code. Someday it may go away. */ static int smack_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { /* * Sending a signal requires that the sender * can write the receiver. */ if (secid == 0) return smk_curacc(task_security(p), MAY_WRITE); /* * If the secid isn't 0 we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ return smk_access(smack_from_secid(secid), task_security(p), MAY_WRITE); } /** * smack_task_wait - Smack access check for waiting * @p: task to wait for * * Returns 0 if current can wait for p, error code otherwise */ static int smack_task_wait(struct task_struct *p) { int rc; rc = smk_access(current_security(), task_security(p), MAY_WRITE); if (rc == 0) return 0; /* * Allow the operation to succeed if either task * has privilege to perform operations that might * account for the smack labels having gotten to * be different in the first place. * * This breaks the strict subject/object access * control ideal, taking the object's privilege * state into account in the decision as well as * the smack value. */ if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE)) return 0; return rc; } /** * smack_task_to_inode - copy task smack into the inode blob * @p: task to copy from * @inode: inode to copy to * * Sets the smack pointer in the inode security blob */ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = inode->i_security; isp->smk_inode = task_security(p); } /* * Socket hooks. */ /** * smack_sk_alloc_security - Allocate a socket blob * @sk: the socket * @family: unused * @gfp_flags: memory allocation flags * * Assign Smack pointers to current * * Returns 0 on success, -ENOMEM is there's no memory */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { char *csp = current_security(); struct socket_smack *ssp; ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); if (ssp == NULL) return -ENOMEM; ssp->smk_in = csp; ssp->smk_out = csp; ssp->smk_packet[0] = '\0'; sk->sk_security = ssp; return 0; } /** * smack_sk_free_security - Free a socket blob * @sk: the socket * * Clears the blob pointer */ static void smack_sk_free_security(struct sock *sk) { kfree(sk->sk_security); } /** * smack_host_label - check host based restrictions * @sip: the object end * * looks for host based access restrictions * * This version will only be appropriate for really small sets of single label * hosts. The caller is responsible for ensuring that the RCU read lock is * taken before calling this function. * * Returns the label of the far end or NULL if it's not special. */ static char *smack_host_label(struct sockaddr_in *sip) { struct smk_netlbladdr *snp; struct in_addr *siap = &sip->sin_addr; if (siap->s_addr == 0) return NULL; list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list) /* * we break after finding the first match because * the list is sorted from longest to shortest mask * so we have found the most specific match */ if ((&snp->smk_host.sin_addr)->s_addr == (siap->s_addr & (&snp->smk_mask)->s_addr)) { /* we have found the special CIPSO option */ if (snp->smk_label == smack_cipso_option) return NULL; return snp->smk_label; } return NULL; } /** * smack_set_catset - convert a capset to netlabel mls categories * @catset: the Smack categories * @sap: where to put the netlabel categories * * Allocates and fills attr.mls.cat */ static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap) { unsigned char *cp; unsigned char m; int cat; int rc; int byte; if (!catset) return; sap->flags |= NETLBL_SECATTR_MLS_CAT; sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); sap->attr.mls.cat->startbit = 0; for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++) for (m = 0x80; m != 0; m >>= 1, cat++) { if ((m & *cp) == 0) continue; rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat, cat, GFP_ATOMIC); } } /** * smack_to_secattr - fill a secattr from a smack value * @smack: the smack value * @nlsp: where the result goes * * Casey says that CIPSO is good enough for now. * It can be used to effect. * It can also be abused to effect when necessary. * Appologies to the TSIG group in general and GW in particular. */ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp) { struct smack_cipso cipso; int rc; nlsp->domain = smack; nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; rc = smack_to_cipso(smack, &cipso); if (rc == 0) { nlsp->attr.mls.lvl = cipso.smk_level; smack_set_catset(cipso.smk_catset, nlsp); } else { nlsp->attr.mls.lvl = smack_cipso_direct; smack_set_catset(smack, nlsp); } } /** * smack_netlabel - Set the secattr on a socket * @sk: the socket * @labeled: socket label scheme * * Convert the outbound smack value (smk_out) to a * secattr and attach it to the socket. * * Returns 0 on success or an error code */ static int smack_netlabel(struct sock *sk, int labeled) { struct socket_smack *ssp = sk->sk_security; struct netlbl_lsm_secattr secattr; int rc = 0; /* * Usually the netlabel code will handle changing the * packet labeling based on the label. * The case of a single label host is different, because * a single label host should never get a labeled packet * even though the label is usually associated with a packet * label. */ local_bh_disable(); bh_lock_sock_nested(sk); if (ssp->smk_out == smack_net_ambient || labeled == SMACK_UNLABELED_SOCKET) netlbl_sock_delattr(sk); else { netlbl_secattr_init(&secattr); smack_to_secattr(ssp->smk_out, &secattr); rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr); netlbl_secattr_destroy(&secattr); } bh_unlock_sock(sk); local_bh_enable(); return rc; } /** * smack_netlbel_send - Set the secattr on a socket and perform access checks * @sk: the socket * @sap: the destination address * * Set the correct secattr for the given socket based on the destination * address and perform any outbound access checks needed. * * Returns 0 on success or an error code. * */ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) { int rc; int sk_lbl; char *hostsp; struct socket_smack *ssp = sk->sk_security; rcu_read_lock(); hostsp = smack_host_label(sap); if (hostsp != NULL) { sk_lbl = SMACK_UNLABELED_SOCKET; rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE); } else { sk_lbl = SMACK_CIPSO_SOCKET; rc = 0; } rcu_read_unlock(); if (rc != 0) return rc; return smack_netlabel(sk, sk_lbl); } /** * smack_inode_setsecurity - set smack xattrs * @inode: the object * @name: attribute name * @value: attribute value * @size: size of the attribute * @flags: unused * * Sets the named attribute in the appropriate blob * * Returns 0 on success, or an error code */ static int smack_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { char *sp; struct inode_smack *nsp = inode->i_security; struct socket_smack *ssp; struct socket *sock; int rc = 0; if (value == NULL || size > SMK_LABELLEN || size == 0) return -EACCES; sp = smk_import(value, size); if (sp == NULL) return -EINVAL; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { nsp->smk_inode = sp; return 0; } /* * The rest of the Smack xattrs are only on sockets. */ if (inode->i_sb->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(inode); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) ssp->smk_in = sp; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) { ssp->smk_out = sp; rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); if (rc != 0) printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n", __func__, -rc); } else return -EOPNOTSUPP; return 0; } /** * smack_socket_post_create - finish socket setup * @sock: the socket * @family: protocol family * @type: unused * @protocol: unused * @kern: unused * * Sets the netlabel information on the socket * * Returns 0 on success, and error code otherwise */ static int smack_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { if (family != PF_INET || sock->sk == NULL) return 0; /* * Set the outbound netlbl. */ return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); } /** * smack_socket_connect - connect access check * @sock: the socket * @sap: the other end * @addrlen: size of sap * * Verifies that a connection may be possible * * Returns 0 on success, and error code otherwise */ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, int addrlen) { if (sock->sk == NULL || sock->sk->sk_family != PF_INET) return 0; if (addrlen < sizeof(struct sockaddr_in)) return -EINVAL; return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); } /** * smack_flags_to_may - convert S_ to MAY_ values * @flags: the S_ value * * Returns the equivalent MAY_ value */ static int smack_flags_to_may(int flags) { int may = 0; if (flags & S_IRUGO) may |= MAY_READ; if (flags & S_IWUGO) may |= MAY_WRITE; if (flags & S_IXUGO) may |= MAY_EXEC; return may; } /** * smack_msg_msg_alloc_security - Set the security blob for msg_msg * @msg: the object * * Returns 0 */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { msg->security = current_security(); return 0; } /** * smack_msg_msg_free_security - Clear the security blob for msg_msg * @msg: the object * * Clears the blob pointer */ static void smack_msg_msg_free_security(struct msg_msg *msg) { msg->security = NULL; } /** * smack_of_shm - the smack pointer for the shm * @shp: the object * * Returns a pointer to the smack value */ static char *smack_of_shm(struct shmid_kernel *shp) { return (char *)shp->shm_perm.security; } /** * smack_shm_alloc_security - Set the security blob for shm * @shp: the object * * Returns 0 */ static int smack_shm_alloc_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; isp->security = current_security(); return 0; } /** * smack_shm_free_security - Clear the security blob for shm * @shp: the object * * Clears the blob pointer */ static void smack_shm_free_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; isp->security = NULL; } /** * smack_shm_associate - Smack access check for shm * @shp: the object * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_associate(struct shmid_kernel *shp, int shmflg) { char *ssp = smack_of_shm(shp); int may; may = smack_flags_to_may(shmflg); return smk_curacc(ssp, may); } /** * smack_shm_shmctl - Smack access check for shm * @shp: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd) { char *ssp; int may; switch (cmd) { case IPC_STAT: case SHM_STAT: may = MAY_READ; break; case IPC_SET: case SHM_LOCK: case SHM_UNLOCK: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case SHM_INFO: /* * System level information. */ return 0; default: return -EINVAL; } ssp = smack_of_shm(shp); return smk_curacc(ssp, may); } /** * smack_shm_shmat - Smack access for shmat * @shp: the object * @shmaddr: unused * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg) { char *ssp = smack_of_shm(shp); int may; may = smack_flags_to_may(shmflg); return smk_curacc(ssp, may); } /** * smack_of_sem - the smack pointer for the sem * @sma: the object * * Returns a pointer to the smack value */ static char *smack_of_sem(struct sem_array *sma) { return (char *)sma->sem_perm.security; } /** * smack_sem_alloc_security - Set the security blob for sem * @sma: the object * * Returns 0 */ static int smack_sem_alloc_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; isp->security = current_security(); return 0; } /** * smack_sem_free_security - Clear the security blob for sem * @sma: the object * * Clears the blob pointer */ static void smack_sem_free_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; isp->security = NULL; } /** * smack_sem_associate - Smack access check for sem * @sma: the object * @semflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_associate(struct sem_array *sma, int semflg) { char *ssp = smack_of_sem(sma); int may; may = smack_flags_to_may(semflg); return smk_curacc(ssp, may); } /** * smack_sem_shmctl - Smack access check for sem * @sma: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_semctl(struct sem_array *sma, int cmd) { char *ssp; int may; switch (cmd) { case GETPID: case GETNCNT: case GETZCNT: case GETVAL: case GETALL: case IPC_STAT: case SEM_STAT: may = MAY_READ; break; case SETVAL: case SETALL: case IPC_RMID: case IPC_SET: may = MAY_READWRITE; break; case IPC_INFO: case SEM_INFO: /* * System level information */ return 0; default: return -EINVAL; } ssp = smack_of_sem(sma); return smk_curacc(ssp, may); } /** * smack_sem_semop - Smack checks of semaphore operations * @sma: the object * @sops: unused * @nsops: unused * @alter: unused * * Treated as read and write in all cases. * * Returns 0 if access is allowed, error code otherwise */ static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter) { char *ssp = smack_of_sem(sma); return smk_curacc(ssp, MAY_READWRITE); } /** * smack_msg_alloc_security - Set the security blob for msg * @msq: the object * * Returns 0 */ static int smack_msg_queue_alloc_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; kisp->security = current_security(); return 0; } /** * smack_msg_free_security - Clear the security blob for msg * @msq: the object * * Clears the blob pointer */ static void smack_msg_queue_free_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; kisp->security = NULL; } /** * smack_of_msq - the smack pointer for the msq * @msq: the object * * Returns a pointer to the smack value */ static char *smack_of_msq(struct msg_queue *msq) { return (char *)msq->q_perm.security; } /** * smack_msg_queue_associate - Smack access check for msg_queue * @msq: the object * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg) { char *msp = smack_of_msq(msq); int may; may = smack_flags_to_may(msqflg); return smk_curacc(msp, may); } /** * smack_msg_queue_msgctl - Smack access check for msg_queue * @msq: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd) { char *msp; int may; switch (cmd) { case IPC_STAT: case MSG_STAT: may = MAY_READ; break; case IPC_SET: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case MSG_INFO: /* * System level information */ return 0; default: return -EINVAL; } msp = smack_of_msq(msq); return smk_curacc(msp, may); } /** * smack_msg_queue_msgsnd - Smack access check for msg_queue * @msq: the object * @msg: unused * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg) { char *msp = smack_of_msq(msq); int rc; rc = smack_flags_to_may(msqflg); return smk_curacc(msp, rc); } /** * smack_msg_queue_msgsnd - Smack access check for msg_queue * @msq: the object * @msg: unused * @target: unused * @type: unused * @mode: unused * * Returns 0 if current has read and write access, error code otherwise */ static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { char *msp = smack_of_msq(msq); return smk_curacc(msp, MAY_READWRITE); } /** * smack_ipc_permission - Smack access for ipc_permission() * @ipp: the object permissions * @flag: access requested * * Returns 0 if current has read and write access, error code otherwise */ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag) { char *isp = ipp->security; int may; may = smack_flags_to_may(flag); return smk_curacc(isp, may); } /** * smack_ipc_getsecid - Extract smack security id * @ipp: the object permissions * @secid: where result will be saved */ static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid) { char *smack = ipp->security; *secid = smack_to_secid(smack); } /** * smack_d_instantiate - Make sure the blob is correct on an inode * @opt_dentry: unused * @inode: the object * * Set the inode's security blob if it hasn't been done already. */ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) { struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; char *csp = current_security(); char *fetched; char *final; struct dentry *dp; if (inode == NULL) return; isp = inode->i_security; mutex_lock(&isp->smk_lock); /* * If the inode is already instantiated * take the quick way out */ if (isp->smk_flags & SMK_INODE_INSTANT) goto unlockandout; sbp = inode->i_sb; sbsp = sbp->s_security; /* * We're going to use the superblock default label * if there's no label on the file. */ final = sbsp->smk_default; /* * If this is the root inode the superblock * may be in the process of initialization. * If that is the case use the root value out * of the superblock. */ if (opt_dentry->d_parent == opt_dentry) { isp->smk_inode = sbsp->smk_root; isp->smk_flags |= SMK_INODE_INSTANT; goto unlockandout; } /* * This is pretty hackish. * Casey says that we shouldn't have to do * file system specific code, but it does help * with keeping it simple. */ switch (sbp->s_magic) { case SMACK_MAGIC: /* * Casey says that it's a little embarassing * that the smack file system doesn't do * extended attributes. */ final = smack_known_star.smk_known; break; case PIPEFS_MAGIC: /* * Casey says pipes are easy (?) */ final = smack_known_star.smk_known; break; case DEVPTS_SUPER_MAGIC: /* * devpts seems content with the label of the task. * Programs that change smack have to treat the * pty with respect. */ final = csp; break; case SOCKFS_MAGIC: /* * Casey says sockets get the smack of the task. */ final = csp; break; case PROC_SUPER_MAGIC: /* * Casey says procfs appears not to care. * The superblock default suffices. */ break; case TMPFS_MAGIC: /* * Device labels should come from the filesystem, * but watch out, because they're volitile, * getting recreated on every reboot. */ final = smack_known_star.smk_known; /* * No break. * * If a smack value has been set we want to use it, * but since tmpfs isn't giving us the opportunity * to set mount options simulate setting the * superblock default. */ default: /* * This isn't an understood special case. * Get the value from the xattr. * * No xattr support means, alas, no SMACK label. * Use the aforeapplied default. * It would be curious if the label of the task * does not match that assigned. */ if (inode->i_op->getxattr == NULL) break; /* * Get the dentry for xattr. */ if (opt_dentry == NULL) { dp = d_find_alias(inode); if (dp == NULL) break; } else { dp = dget(opt_dentry); if (dp == NULL) break; } fetched = smk_fetch(inode, dp); if (fetched != NULL) final = fetched; dput(dp); break; } if (final == NULL) isp->smk_inode = csp; else isp->smk_inode = final; isp->smk_flags |= SMK_INODE_INSTANT; unlockandout: mutex_unlock(&isp->smk_lock); return; } /** * smack_getprocattr - Smack process attribute access * @p: the object task * @name: the name of the attribute in /proc/.../attr * @value: where to put the result * * Places a copy of the task Smack into value * * Returns the length of the smack label or an error code */ static int smack_getprocattr(struct task_struct *p, char *name, char **value) { char *cp; int slen; if (strcmp(name, "current") != 0) return -EINVAL; cp = kstrdup(task_security(p), GFP_KERNEL); if (cp == NULL) return -ENOMEM; slen = strlen(cp); *value = cp; return slen; } /** * smack_setprocattr - Smack process attribute setting * @p: the object task * @name: the name of the attribute in /proc/.../attr * @value: the value to set * @size: the size of the value * * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * * Returns the length of the smack label or an error code */ static int smack_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { struct cred *new; char *newsmack; /* * Changing another process' Smack value is too dangerous * and supports no sane use case. */ if (p != current) return -EPERM; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (value == NULL || size == 0 || size >= SMK_LABELLEN) return -EINVAL; if (strcmp(name, "current") != 0) return -EINVAL; newsmack = smk_import(value, size); if (newsmack == NULL) return -EINVAL; /* * No process is ever allowed the web ("@") label. */ if (newsmack == smack_known_web.smk_known) return -EPERM; new = prepare_creds(); if (new == NULL) return -ENOMEM; new->security = newsmack; commit_creds(new); return size; } /** * smack_unix_stream_connect - Smack access on UDS * @sock: one socket * @other: the other socket * @newsk: unused * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_stream_connect(struct socket *sock, struct socket *other, struct sock *newsk) { struct inode *sp = SOCK_INODE(sock); struct inode *op = SOCK_INODE(other); return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_READWRITE); } /** * smack_unix_may_send - Smack access on UDS * @sock: one socket * @other: the other socket * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_may_send(struct socket *sock, struct socket *other) { struct inode *sp = SOCK_INODE(sock); struct inode *op = SOCK_INODE(other); return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE); } /** * smack_socket_sendmsg - Smack check based on destination host * @sock: the socket * @msg: the message * @size: the size of the message * * Return 0 if the current subject can write to the destination * host. This is only a question if the destination is a single * label host. */ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; /* * Perfectly reasonable for this to be NULL */ if (sip == NULL || sip->sin_family != PF_INET) return 0; return smack_netlabel_send(sock->sk, sip); } /** * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack * @sap: netlabel secattr * @sip: where to put the result * * Copies a smack label into sip */ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) { char smack[SMK_LABELLEN]; char *sp; int pcat; if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) { /* * Looks like a CIPSO packet. * If there are flags but no level netlabel isn't * behaving the way we expect it to. * * Get the categories, if any * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ memset(smack, '\0', SMK_LABELLEN); if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0) for (pcat = -1;;) { pcat = netlbl_secattr_catmap_walk( sap->attr.mls.cat, pcat + 1); if (pcat < 0) break; smack_catset_bit(pcat, smack); } /* * If it is CIPSO using smack direct mapping * we are already done. WeeHee. */ if (sap->attr.mls.lvl == smack_cipso_direct) { memcpy(sip, smack, SMK_MAXLEN); return; } /* * Look it up in the supplied table if it is not * a direct mapping. */ smack_from_cipso(sap->attr.mls.lvl, smack, sip); return; } if ((sap->flags & NETLBL_SECATTR_SECID) != 0) { /* * Looks like a fallback, which gives us a secid. */ sp = smack_from_secid(sap->attr.secid); /* * This has got to be a bug because it is * impossible to specify a fallback without * specifying the label, which will ensure * it has a secid, and the only way to get a * secid is from a fallback. */ BUG_ON(sp == NULL); strncpy(sip, sp, SMK_MAXLEN); return; } /* * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ strncpy(sip, smack_net_ambient, SMK_MAXLEN); return; } /** * smack_socket_sock_rcv_skb - Smack packet delivery access check * @sk: socket * @skb: packet * * Returns 0 if the packet should be delivered, an error code otherwise */ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = sk->sk_security; char smack[SMK_LABELLEN]; char *csp; int rc; if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) return 0; /* * Translate what netlabel gave us. */ netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); if (rc == 0) { smack_from_secattr(&secattr, smack); csp = smack; } else csp = smack_net_ambient; netlbl_secattr_destroy(&secattr); /* * Receiving a packet requires that the other end * be able to write here. Read access is not required. * This is the simplist possible security model * for networking. */ rc = smk_access(csp, ssp->smk_in, MAY_WRITE); if (rc != 0) netlbl_skbuff_err(skb, rc, 0); return rc; } /** * smack_socket_getpeersec_stream - pull in packet label * @sock: the socket * @optval: user's destination * @optlen: size thereof * @len: max thereof * * returns zero on success, an error code otherwise */ static int smack_socket_getpeersec_stream(struct socket *sock, char __user *optval, int __user *optlen, unsigned len) { struct socket_smack *ssp; int slen; int rc = 0; ssp = sock->sk->sk_security; slen = strlen(ssp->smk_packet) + 1; if (slen > len) rc = -ERANGE; else if (copy_to_user(optval, ssp->smk_packet, slen) != 0) rc = -EFAULT; if (put_user(slen, optlen) != 0) rc = -EFAULT; return rc; } /** * smack_socket_getpeersec_dgram - pull in packet label * @sock: the socket * @skb: packet data * @secid: pointer to where to put the secid of the packet * * Sets the netlabel socket state on sk from parent */ static int smack_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { struct netlbl_lsm_secattr secattr; struct sock *sk; char smack[SMK_LABELLEN]; int family = PF_INET; u32 s; int rc; /* * Only works for families with packets. */ if (sock != NULL) { sk = sock->sk; if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) return 0; family = sk->sk_family; } /* * Translate what netlabel gave us. */ netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) smack_from_secattr(&secattr, smack); netlbl_secattr_destroy(&secattr); /* * Give up if we couldn't get anything */ if (rc != 0) return rc; s = smack_to_secid(smack); if (s == 0) return -EINVAL; *secid = s; return 0; } /** * smack_sock_graft - Initialize a newly created socket with an existing sock * @sk: child sock * @parent: parent socket * * Set the smk_{in,out} state of an existing sock based on the process that * is creating the new socket. */ static void smack_sock_graft(struct sock *sk, struct socket *parent) { struct socket_smack *ssp; if (sk == NULL || (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) return; ssp = sk->sk_security; ssp->smk_in = ssp->smk_out = current_security(); /* cssp->smk_packet is already set in smack_inet_csk_clone() */ } /** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved * @skb: packet * @req: unused * * Returns 0 if a task with the packet label could write to * the socket, otherwise an error code */ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { u16 family = sk->sk_family; struct socket_smack *ssp = sk->sk_security; struct netlbl_lsm_secattr secattr; struct sockaddr_in addr; struct iphdr *hdr; char smack[SMK_LABELLEN]; int rc; /* handle mapped IPv4 packets arriving via IPv6 sockets */ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) family = PF_INET; netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) smack_from_secattr(&secattr, smack); else strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN); netlbl_secattr_destroy(&secattr); /* * Receiving a packet requires that the other end be able to write * here. Read access is not required. */ rc = smk_access(smack, ssp->smk_in, MAY_WRITE); if (rc != 0) return rc; /* * Save the peer's label in the request_sock so we can later setup * smk_packet in the child socket so that SO_PEERCRED can report it. */ req->peer_secid = smack_to_secid(smack); /* * We need to decide if we want to label the incoming connection here * if we do we only need to label the request_sock and the stack will * propogate the wire-label to the sock when it is created. */ hdr = ip_hdr(skb); addr.sin_addr.s_addr = hdr->saddr; rcu_read_lock(); if (smack_host_label(&addr) == NULL) { rcu_read_unlock(); netlbl_secattr_init(&secattr); smack_to_secattr(smack, &secattr); rc = netlbl_req_setattr(req, &secattr); netlbl_secattr_destroy(&secattr); } else { rcu_read_unlock(); netlbl_req_delattr(req); } return rc; } /** * smack_inet_csk_clone - Copy the connection information to the new socket * @sk: the new socket * @req: the connection's request_sock * * Transfer the connection's peer label to the newly created socket. */ static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { struct socket_smack *ssp = sk->sk_security; char *smack; if (req->peer_secid != 0) { smack = smack_from_secid(req->peer_secid); strncpy(ssp->smk_packet, smack, SMK_MAXLEN); } else ssp->smk_packet[0] = '\0'; } /* * Key management security hooks * * Casey has not tested key support very heavily. * The permission check is most likely too restrictive. * If you care about keys please have a look. */ #ifdef CONFIG_KEYS /** * smack_key_alloc - Set the key security blob * @key: object * @cred: the credentials to use * @flags: unused * * No allocation required * * Returns 0 */ static int smack_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { key->security = cred->security; return 0; } /** * smack_key_free - Clear the key security blob * @key: the object * * Clear the blob pointer */ static void smack_key_free(struct key *key) { key->security = NULL; } /* * smack_key_permission - Smack access on a key * @key_ref: gets to the object * @cred: the credentials to use * @perm: unused * * Return 0 if the task has read and write to the object, * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *keyp; keyp = key_ref_to_ptr(key_ref); if (keyp == NULL) return -EINVAL; /* * If the key hasn't been initialized give it access so that * it may do so. */ if (keyp->security == NULL) return 0; /* * This should not occur */ if (cred->security == NULL) return -EACCES; return smk_access(cred->security, keyp->security, MAY_READWRITE); } #endif /* CONFIG_KEYS */ /* * Smack Audit hooks * * Audit requires a unique representation of each Smack specific * rule. This unique representation is used to distinguish the * object to be audited from remaining kernel objects and also * works as a glue between the audit hooks. * * Since repository entries are added but never deleted, we'll use * the smack_known label address related to the given audit rule as * the needed unique representation. This also better fits the smack * model where nearly everything is a label. */ #ifdef CONFIG_AUDIT /** * smack_audit_rule_init - Initialize a smack audit rule * @field: audit rule fields given from user-space (audit.h) * @op: required testing operator (=, !=, >, <, ...) * @rulestr: smack label to be audited * @vrule: pointer to save our own audit rule representation * * Prepare to audit cases where (@field @op @rulestr) is true. * The label to be audited is created if necessay. */ static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) { char **rule = (char **)vrule; *rule = NULL; if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return -EINVAL; if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; *rule = smk_import(rulestr, 0); return 0; } /** * smack_audit_rule_known - Distinguish Smack audit rules * @krule: rule of interest, in Audit kernel representation format * * This is used to filter Smack rules from remaining Audit ones. * If it's proved that this rule belongs to us, the * audit_rule_match hook will be called to do the final judgement. */ static int smack_audit_rule_known(struct audit_krule *krule) { struct audit_field *f; int i; for (i = 0; i < krule->field_count; i++) { f = &krule->fields[i]; if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER) return 1; } return 0; } /** * smack_audit_rule_match - Audit given object ? * @secid: security id for identifying the object to test * @field: audit rule flags given from user-space * @op: required testing operator * @vrule: smack internal rule presentation * @actx: audit context associated with the check * * The core Audit hook. It's used to take the decision of * whether to audit or not to audit a given object. */ static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule, struct audit_context *actx) { char *smack; char *rule = vrule; if (!rule) { audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR, "Smack: missing rule\n"); return -ENOENT; } if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return 0; smack = smack_from_secid(secid); /* * No need to do string comparisons. If a match occurs, * both pointers will point to the same smack_known * label. */ if (op == Audit_equal) return (rule == smack); if (op == Audit_not_equal) return (rule != smack); return 0; } /** * smack_audit_rule_free - free smack rule representation * @vrule: rule to be freed. * * No memory was allocated. */ static void smack_audit_rule_free(void *vrule) { /* No-op */ } #endif /* CONFIG_AUDIT */ /** * smack_secid_to_secctx - return the smack label for a secid * @secid: incoming integer * @secdata: destination * @seclen: how long it is * * Exists for networking code. */ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { char *sp = smack_from_secid(secid); *secdata = sp; *seclen = strlen(sp); return 0; } /** * smack_secctx_to_secid - return the secid for a smack label * @secdata: smack label * @seclen: how long result is * @secid: outgoing integer * * Exists for audit and networking code. */ static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { *secid = smack_to_secid(secdata); return 0; } /** * smack_release_secctx - don't do anything. * @secdata: unused * @seclen: unused * * Exists to make sure nothing gets done, and properly */ static void smack_release_secctx(char *secdata, u32 seclen) { } struct security_operations smack_ops = { .name = "smack", .ptrace_may_access = smack_ptrace_may_access, .ptrace_traceme = smack_ptrace_traceme, .capget = cap_capget, .capset = cap_capset, .capable = cap_capable, .syslog = smack_syslog, .settime = cap_settime, .vm_enough_memory = cap_vm_enough_memory, .bprm_set_creds = cap_bprm_set_creds, .bprm_secureexec = cap_bprm_secureexec, .sb_alloc_security = smack_sb_alloc_security, .sb_free_security = smack_sb_free_security, .sb_copy_data = smack_sb_copy_data, .sb_kern_mount = smack_sb_kern_mount, .sb_statfs = smack_sb_statfs, .sb_mount = smack_sb_mount, .sb_umount = smack_sb_umount, .inode_alloc_security = smack_inode_alloc_security, .inode_free_security = smack_inode_free_security, .inode_init_security = smack_inode_init_security, .inode_link = smack_inode_link, .inode_unlink = smack_inode_unlink, .inode_rmdir = smack_inode_rmdir, .inode_rename = smack_inode_rename, .inode_permission = smack_inode_permission, .inode_setattr = smack_inode_setattr, .inode_getattr = smack_inode_getattr, .inode_setxattr = smack_inode_setxattr, .inode_post_setxattr = smack_inode_post_setxattr, .inode_getxattr = smack_inode_getxattr, .inode_removexattr = smack_inode_removexattr, .inode_need_killpriv = cap_inode_need_killpriv, .inode_killpriv = cap_inode_killpriv, .inode_getsecurity = smack_inode_getsecurity, .inode_setsecurity = smack_inode_setsecurity, .inode_listsecurity = smack_inode_listsecurity, .inode_getsecid = smack_inode_getsecid, .file_permission = smack_file_permission, .file_alloc_security = smack_file_alloc_security, .file_free_security = smack_file_free_security, .file_ioctl = smack_file_ioctl, .file_lock = smack_file_lock, .file_fcntl = smack_file_fcntl, .file_set_fowner = smack_file_set_fowner, .file_send_sigiotask = smack_file_send_sigiotask, .file_receive = smack_file_receive, .cred_free = smack_cred_free, .cred_prepare = smack_cred_prepare, .cred_commit = smack_cred_commit, .kernel_act_as = smack_kernel_act_as, .kernel_create_files_as = smack_kernel_create_files_as, .task_fix_setuid = cap_task_fix_setuid, .task_setpgid = smack_task_setpgid, .task_getpgid = smack_task_getpgid, .task_getsid = smack_task_getsid, .task_getsecid = smack_task_getsecid, .task_setnice = smack_task_setnice, .task_setioprio = smack_task_setioprio, .task_getioprio = smack_task_getioprio, .task_setscheduler = smack_task_setscheduler, .task_getscheduler = smack_task_getscheduler, .task_movememory = smack_task_movememory, .task_kill = smack_task_kill, .task_wait = smack_task_wait, .task_to_inode = smack_task_to_inode, .task_prctl = cap_task_prctl, .ipc_permission = smack_ipc_permission, .ipc_getsecid = smack_ipc_getsecid, .msg_msg_alloc_security = smack_msg_msg_alloc_security, .msg_msg_free_security = smack_msg_msg_free_security, .msg_queue_alloc_security = smack_msg_queue_alloc_security, .msg_queue_free_security = smack_msg_queue_free_security, .msg_queue_associate = smack_msg_queue_associate, .msg_queue_msgctl = smack_msg_queue_msgctl, .msg_queue_msgsnd = smack_msg_queue_msgsnd, .msg_queue_msgrcv = smack_msg_queue_msgrcv, .shm_alloc_security = smack_shm_alloc_security, .shm_free_security = smack_shm_free_security, .shm_associate = smack_shm_associate, .shm_shmctl = smack_shm_shmctl, .shm_shmat = smack_shm_shmat, .sem_alloc_security = smack_sem_alloc_security, .sem_free_security = smack_sem_free_security, .sem_associate = smack_sem_associate, .sem_semctl = smack_sem_semctl, .sem_semop = smack_sem_semop, .netlink_send = cap_netlink_send, .netlink_recv = cap_netlink_recv, .d_instantiate = smack_d_instantiate, .getprocattr = smack_getprocattr, .setprocattr = smack_setprocattr, .unix_stream_connect = smack_unix_stream_connect, .unix_may_send = smack_unix_may_send, .socket_post_create = smack_socket_post_create, .socket_connect = smack_socket_connect, .socket_sendmsg = smack_socket_sendmsg, .socket_sock_rcv_skb = smack_socket_sock_rcv_skb, .socket_getpeersec_stream = smack_socket_getpeersec_stream, .socket_getpeersec_dgram = smack_socket_getpeersec_dgram, .sk_alloc_security = smack_sk_alloc_security, .sk_free_security = smack_sk_free_security, .sock_graft = smack_sock_graft, .inet_conn_request = smack_inet_conn_request, .inet_csk_clone = smack_inet_csk_clone, /* key management security hooks */ #ifdef CONFIG_KEYS .key_alloc = smack_key_alloc, .key_free = smack_key_free, .key_permission = smack_key_permission, #endif /* CONFIG_KEYS */ /* Audit hooks */ #ifdef CONFIG_AUDIT .audit_rule_init = smack_audit_rule_init, .audit_rule_known = smack_audit_rule_known, .audit_rule_match = smack_audit_rule_match, .audit_rule_free = smack_audit_rule_free, #endif /* CONFIG_AUDIT */ .secid_to_secctx = smack_secid_to_secctx, .secctx_to_secid = smack_secctx_to_secid, .release_secctx = smack_release_secctx, }; static __init void init_smack_know_list(void) { list_add(&smack_known_huh.list, &smack_known_list); list_add(&smack_known_hat.list, &smack_known_list); list_add(&smack_known_star.list, &smack_known_list); list_add(&smack_known_floor.list, &smack_known_list); list_add(&smack_known_invalid.list, &smack_known_list); list_add(&smack_known_web.list, &smack_known_list); } /** * smack_init - initialize the smack system * * Returns 0 */ static __init int smack_init(void) { struct cred *cred; if (!security_module_enable(&smack_ops)) return 0; printk(KERN_INFO "Smack: Initializing.\n"); /* * Set the security state for the initial task. */ cred = (struct cred *) current->cred; cred->security = &smack_known_floor.smk_known; /* initilize the smack_know_list */ init_smack_know_list(); /* * Initialize locks */ spin_lock_init(&smack_known_huh.smk_cipsolock); spin_lock_init(&smack_known_hat.smk_cipsolock); spin_lock_init(&smack_known_star.smk_cipsolock); spin_lock_init(&smack_known_floor.smk_cipsolock); spin_lock_init(&smack_known_invalid.smk_cipsolock); /* * Register with LSM */ if (register_security(&smack_ops)) panic("smack: Unable to register with kernel.\n"); return 0; } /* * Smack requires early initialization in order to label * all processes and objects when they are created. */ security_initcall(smack_init);
pichina/linux-bcache
security/smack/smack_lsm.c
C
gpl-2.0
70,465