repo_name
string
path
string
copies
string
size
string
content
string
license
string
alephzain/archos-gpl-gen8-kernel
sound/soc/pxa/pxa2xx-i2s.c
146
10164
/* * pxa2xx-i2s.c -- ALSA Soc Audio Layer * * Copyright 2005 Wolfson Microelectronics PLC. * Author: Liam Girdwood * lrg@slimlogic.co.uk * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/pxa2xx-lib.h> #include <mach/hardware.h> #include <mach/pxa-regs.h> #include <mach/pxa2xx-gpio.h> #include <mach/audio.h> #include "pxa2xx-pcm.h" #include "pxa2xx-i2s.h" struct pxa2xx_gpio { u32 sys; u32 rx; u32 tx; u32 clk; u32 frm; }; /* * I2S Controller Register and Bit Definitions */ #define SACR0 __REG(0x40400000) /* Global Control Register */ #define SACR1 __REG(0x40400004) /* Serial Audio I 2 S/MSB-Justified Control Register */ #define SASR0 __REG(0x4040000C) /* Serial Audio I 2 S/MSB-Justified Interface and FIFO Status Register */ #define SAIMR __REG(0x40400014) /* Serial Audio Interrupt Mask Register */ #define SAICR __REG(0x40400018) /* Serial Audio Interrupt Clear Register */ #define SADIV __REG(0x40400060) /* Audio Clock Divider Register. */ #define SADR __REG(0x40400080) /* Serial Audio Data Register (TX and RX FIFO access Register). */ #define SACR0_RFTH(x) ((x) << 12) /* Rx FIFO Interrupt or DMA Trigger Threshold */ #define SACR0_TFTH(x) ((x) << 8) /* Tx FIFO Interrupt or DMA Trigger Threshold */ #define SACR0_STRF (1 << 5) /* FIFO Select for EFWR Special Function */ #define SACR0_EFWR (1 << 4) /* Enable EFWR Function */ #define SACR0_RST (1 << 3) /* FIFO, i2s Register Reset */ #define SACR0_BCKD (1 << 2) /* Bit Clock Direction */ #define SACR0_ENB (1 << 0) /* Enable I2S Link */ #define SACR1_ENLBF (1 << 5) /* Enable Loopback */ #define SACR1_DRPL (1 << 4) /* Disable Replaying Function */ #define SACR1_DREC (1 << 3) /* Disable Recording Function */ #define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */ #define SASR0_I2SOFF (1 << 7) /* Controller Status */ #define SASR0_ROR (1 << 6) /* Rx FIFO Overrun */ #define SASR0_TUR (1 << 5) /* Tx FIFO Underrun */ #define SASR0_RFS (1 << 4) /* Rx FIFO Service Request */ #define SASR0_TFS (1 << 3) /* Tx FIFO Service Request */ #define SASR0_BSY (1 << 2) /* I2S Busy */ #define SASR0_RNE (1 << 1) /* Rx FIFO Not Empty */ #define SASR0_TNF (1 << 0) /* Tx FIFO Not Empty */ #define SAICR_ROR (1 << 6) /* Clear Rx FIFO Overrun Interrupt */ #define SAICR_TUR (1 << 5) /* Clear Tx FIFO Underrun Interrupt */ #define SAIMR_ROR (1 << 6) /* Enable Rx FIFO Overrun Condition Interrupt */ #define SAIMR_TUR (1 << 5) /* Enable Tx FIFO Underrun Condition Interrupt */ #define SAIMR_RFS (1 << 4) /* Enable Rx FIFO Service Interrupt */ #define SAIMR_TFS (1 << 3) /* Enable Tx FIFO Service Interrupt */ struct pxa_i2s_port { u32 sadiv; u32 sacr0; u32 sacr1; u32 saimr; int master; u32 fmt; }; static struct pxa_i2s_port pxa_i2s; static struct clk *clk_i2s; static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_out = { .name = "I2S PCM Stereo out", .dev_addr = __PREG(SADR), .drcmr = &DRCMR(3), .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST32 | DCMD_WIDTH4, }; static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_in = { .name = "I2S PCM Stereo in", .dev_addr = __PREG(SADR), .drcmr = &DRCMR(2), .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_BURST32 | DCMD_WIDTH4, }; static struct pxa2xx_gpio gpio_bus[] = { { /* I2S SoC Slave */ .rx = GPIO29_SDATA_IN_I2S_MD, .tx = GPIO30_SDATA_OUT_I2S_MD, .clk = GPIO28_BITCLK_IN_I2S_MD, .frm = GPIO31_SYNC_I2S_MD, }, { /* I2S SoC Master */ .rx = GPIO29_SDATA_IN_I2S_MD, .tx = GPIO30_SDATA_OUT_I2S_MD, .clk = GPIO28_BITCLK_OUT_I2S_MD, .frm = GPIO31_SYNC_I2S_MD, }, }; static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; if (IS_ERR(clk_i2s)) return PTR_ERR(clk_i2s); if (!cpu_dai->active) { SACR0 |= SACR0_RST; SACR0 = 0; } return 0; } /* wait for I2S controller to be ready */ static int pxa_i2s_wait(void) { int i; /* flush the Rx FIFO */ for(i = 0; i < 16; i++) SADR; return 0; } static int pxa2xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: pxa_i2s.fmt = 0; break; case SND_SOC_DAIFMT_LEFT_J: pxa_i2s.fmt = SACR1_AMSL; break; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: pxa_i2s.master = 1; break; case SND_SOC_DAIFMT_CBM_CFS: pxa_i2s.master = 0; break; default: break; } return 0; } static int pxa2xx_i2s_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { if (clk_id != PXA2XX_I2S_SYSCLK) return -ENODEV; if (pxa_i2s.master && dir == SND_SOC_CLOCK_OUT) pxa_gpio_mode(gpio_bus[pxa_i2s.master].sys); return 0; } static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; pxa_gpio_mode(gpio_bus[pxa_i2s.master].rx); pxa_gpio_mode(gpio_bus[pxa_i2s.master].tx); pxa_gpio_mode(gpio_bus[pxa_i2s.master].frm); pxa_gpio_mode(gpio_bus[pxa_i2s.master].clk); BUG_ON(IS_ERR(clk_i2s)); clk_enable(clk_i2s); pxa_i2s_wait(); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_out; else cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_in; /* is port used by another stream */ if (!(SACR0 & SACR0_ENB)) { SACR0 = 0; SACR1 = 0; if (pxa_i2s.master) SACR0 |= SACR0_BCKD; SACR0 |= SACR0_RFTH(14) | SACR0_TFTH(1); SACR1 |= pxa_i2s.fmt; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) SAIMR |= SAIMR_TFS; else SAIMR |= SAIMR_RFS; switch (params_rate(params)) { case 8000: SADIV = 0x48; break; case 11025: SADIV = 0x34; break; case 16000: SADIV = 0x24; break; case 22050: SADIV = 0x1a; break; case 44100: SADIV = 0xd; break; case 48000: SADIV = 0xc; break; case 96000: /* not in manual and possibly slightly inaccurate */ SADIV = 0x6; break; } return 0; } static int pxa2xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: SACR0 |= SACR0_ENB; break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: break; default: ret = -EINVAL; } return ret; } static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { SACR1 |= SACR1_DRPL; SAIMR &= ~SAIMR_TFS; } else { SACR1 |= SACR1_DREC; SAIMR &= ~SAIMR_RFS; } if (SACR1 & (SACR1_DREC | SACR1_DRPL)) { SACR0 &= ~SACR0_ENB; pxa_i2s_wait(); clk_disable(clk_i2s); } clk_put(clk_i2s); } #ifdef CONFIG_PM static int pxa2xx_i2s_suspend(struct snd_soc_dai *dai) { if (!dai->active) return 0; /* store registers */ pxa_i2s.sacr0 = SACR0; pxa_i2s.sacr1 = SACR1; pxa_i2s.saimr = SAIMR; pxa_i2s.sadiv = SADIV; /* deactivate link */ SACR0 &= ~SACR0_ENB; pxa_i2s_wait(); return 0; } static int pxa2xx_i2s_resume(struct snd_soc_dai *dai) { if (!dai->active) return 0; pxa_i2s_wait(); SACR0 = pxa_i2s.sacr0 &= ~SACR0_ENB; SACR1 = pxa_i2s.sacr1; SAIMR = pxa_i2s.saimr; SADIV = pxa_i2s.sadiv; SACR0 |= SACR0_ENB; return 0; } #else #define pxa2xx_i2s_suspend NULL #define pxa2xx_i2s_resume NULL #endif #define PXA2XX_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000) struct snd_soc_dai pxa_i2s_dai = { .name = "pxa2xx-i2s", .id = 0, .suspend = pxa2xx_i2s_suspend, .resume = pxa2xx_i2s_resume, .playback = { .channels_min = 2, .channels_max = 2, .rates = PXA2XX_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .capture = { .channels_min = 2, .channels_max = 2, .rates = PXA2XX_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .ops = { .startup = pxa2xx_i2s_startup, .shutdown = pxa2xx_i2s_shutdown, .trigger = pxa2xx_i2s_trigger, .hw_params = pxa2xx_i2s_hw_params, .set_fmt = pxa2xx_i2s_set_dai_fmt, .set_sysclk = pxa2xx_i2s_set_dai_sysclk, }, }; EXPORT_SYMBOL_GPL(pxa_i2s_dai); static int pxa2xx_i2s_probe(struct platform_device *dev) { int ret; clk_i2s = clk_get(&dev->dev, "I2SCLK"); if (IS_ERR(clk_i2s)) return PTR_ERR(clk_i2s); pxa_i2s_dai.dev = &dev->dev; ret = snd_soc_register_dai(&pxa_i2s_dai); if (ret != 0) clk_put(clk_i2s); return ret; } static int __devexit pxa2xx_i2s_remove(struct platform_device *dev) { snd_soc_unregister_dai(&pxa_i2s_dai); clk_put(clk_i2s); clk_i2s = ERR_PTR(-ENOENT); return 0; } static struct platform_driver pxa2xx_i2s_driver = { .probe = pxa2xx_i2s_probe, .remove = __devexit_p(pxa2xx_i2s_remove), .driver = { .name = "pxa2xx-i2s", .owner = THIS_MODULE, }, }; static int __init pxa2xx_i2s_init(void) { if (cpu_is_pxa27x()) gpio_bus[1].sys = GPIO113_I2S_SYSCLK_MD; else gpio_bus[1].sys = GPIO32_SYSCLK_I2S_MD; clk_i2s = ERR_PTR(-ENOENT); return platform_driver_register(&pxa2xx_i2s_driver); } static void __exit pxa2xx_i2s_exit(void) { platform_driver_unregister(&pxa2xx_i2s_driver); } module_init(pxa2xx_i2s_init); module_exit(pxa2xx_i2s_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); MODULE_DESCRIPTION("pxa2xx I2S SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
bitthunder-toolchain/newlib
newlib/libc/string/wmemmove.c
146
2957
/* FUNCTION <<wmemmove>>---copy wide characters in memory with overlapping areas ANSI_SYNOPSIS #include <wchar.h> wchar_t *wmemmove(wchar_t *<[d]>, const wchar_t *<[s]>, size_t <[n]>); TRAD_SYNOPSIS wchar_t *wmemmove(<[d]>, <[s]>, <[n]> wchar_t *<[d]>; const wchar_t *<[s]>; size_t <[n]>; DESCRIPTION The <<wmemmove>> function copies <[n]> wide characters from the object pointed to by <[s]> to the object pointed to by <[d]>. Copying takes place as if the <[n]> wide characters from the object pointed to by <[s]> are first copied into a temporary array of <[n]> wide characters that does not overlap the objects pointed to by <[d]> or <[s]>, and then the <[n]> wide characters from the temporary array are copied into the object pointed to by <[d]>. This function is not affected by locale and all wchar_t values are treated identically. The null wide character and wchar_t values not corresponding to valid characters are not treated specially. If <[n]> is zero, <[d]> and <[s]> must be a valid pointers, and the function copies zero wide characters. RETURNS The <<wmemmove>> function returns the value of <[d]>. PORTABILITY <<wmemmove>> is ISO/IEC 9899/AMD1:1995 (ISO C). No supporting OS subroutines are required. */ /* $NetBSD: wmemmove.c,v 1.1 2000/12/23 23:14:37 itojun Exp $ */ /*- * Copyright (c)1999 Citrus Project, * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * citrus Id: wmemmove.c,v 1.2 2000/12/20 14:08:31 itojun Exp */ #include <_ansi.h> #include <string.h> #include <wchar.h> wchar_t * _DEFUN (wmemmove, (d, s, n), wchar_t * d _AND _CONST wchar_t * s _AND size_t n) { return (wchar_t *) memmove (d, s, n * sizeof (wchar_t)); }
gpl-2.0
hendersa/bbbandroid-kernel
drivers/scsi/megaraid/megaraid_sas_fusion.c
146
66206
/* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2009-2012 LSI Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_fusion.c * * Authors: LSI Corporation * Sumant Patro * Adam Radford <linuxraid@lsi.com> * * Send feedback to: <megaraidlinux@lsi.com> * * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 * ATTN: Linuxraid */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" extern void megasas_free_cmds(struct megasas_instance *instance); extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance); extern void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); int megasas_is_ldio(struct scsi_cmnd *cmd); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); int megasas_alloc_cmds(struct megasas_instance *instance); int megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd); u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map); u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); void megasas_check_and_restore_queue_depth(struct megasas_instance *instance); u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); void megaraid_sas_kill_hba(struct megasas_instance *instance); extern u32 megasas_dbg_lvl; extern int resetwaittime; /** * megasas_enable_intr_fusion - Enables interrupts * @regs: MFI register set */ void megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) { /* For Thunderbolt/Invader also clear intr on enable */ writel(~0, &regs->outbound_intr_status); readl(&regs->outbound_intr_status); writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_fusion - Disables interrupt * @regs: MFI register set */ void megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs) { u32 mask = 0xFFFFFFFF; u32 status; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ status = readl(&regs->outbound_intr_mask); } int megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) { u32 status; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & 1) { writel(status, &regs->outbound_intr_status); readl(&regs->outbound_intr_status); return 1; } if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return 0; return 1; } /** * megasas_get_cmd_fusion - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance *instance) { unsigned long flags; struct fusion_context *fusion = (struct fusion_context *)instance->ctrl_context; struct megasas_cmd_fusion *cmd = NULL; spin_lock_irqsave(&fusion->cmd_pool_lock, flags); if (!list_empty(&fusion->cmd_pool)) { cmd = list_entry((&fusion->cmd_pool)->next, struct megasas_cmd_fusion, list); list_del_init(&cmd->list); } else { printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); } spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); return cmd; } /** * megasas_return_cmd_fusion - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ static inline void megasas_return_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { unsigned long flags; struct fusion_context *fusion = (struct fusion_context *)instance->ctrl_context; spin_lock_irqsave(&fusion->cmd_pool_lock, flags); cmd->scmd = NULL; cmd->sync_cmd_idx = (u32)ULONG_MAX; list_add_tail(&cmd->list, &fusion->cmd_pool); spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); } /** * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool_fusion( struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; u16 max_cmd = instance->max_fw_cmds; struct megasas_cmd_fusion *cmd; if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, " "sense pool : %p\n", fusion->sg_dma_pool, fusion->sense_dma_pool); return; } /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = fusion->cmd_list[i]; if (cmd->sg_frame) pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, cmd->sg_frame_phys_addr); if (cmd->sense) pci_pool_free(fusion->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ pci_pool_destroy(fusion->sg_dma_pool); pci_pool_destroy(fusion->sense_dma_pool); fusion->sg_dma_pool = NULL; fusion->sense_dma_pool = NULL; } /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds_fusion(struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; u32 max_cmds, req_sz, reply_sz, io_frames_sz; req_sz = fusion->request_alloc_sz; reply_sz = fusion->reply_alloc_sz; io_frames_sz = fusion->io_frames_alloc_sz; max_cmds = instance->max_fw_cmds; /* Free descriptors and request Frames memory */ if (fusion->req_frames_desc) dma_free_coherent(&instance->pdev->dev, req_sz, fusion->req_frames_desc, fusion->req_frames_desc_phys); if (fusion->reply_frames_desc) { pci_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc, fusion->reply_frames_desc_phys); pci_pool_destroy(fusion->reply_frames_desc_pool); } if (fusion->io_request_frames) { pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, fusion->io_request_frames_phys); pci_pool_destroy(fusion->io_request_frames_pool); } /* Free the Fusion frame pool */ megasas_teardown_frame_pool_fusion(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < max_cmds; i++) kfree(fusion->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(fusion->cmd_list); fusion->cmd_list = NULL; INIT_LIST_HEAD(&fusion->cmd_pool); } /** * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames * @instance: Adapter soft state * */ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) { int i; u32 max_cmd; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; u32 total_sz_chain_frame; fusion = instance->ctrl_context; max_cmd = instance->max_fw_cmds; total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME; /* * Use DMA pool facility provided by PCI layer */ fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion", instance->pdev, total_sz_chain_frame, 4, 0); if (!fusion->sg_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup request pool " "fusion\n"); return -ENOMEM; } fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", instance->pdev, SCSI_SENSE_BUFFERSIZE, 64, 0); if (!fusion->sense_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup sense pool " "fusion\n"); pci_pool_destroy(fusion->sg_dma_pool); fusion->sg_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list */ for (i = 0; i < max_cmd; i++) { cmd = fusion->cmd_list[i]; cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, GFP_KERNEL, &cmd->sg_frame_phys_addr); cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool_fusion() takes care of freeing * whatever has been allocated */ if (!cmd->sg_frame || !cmd->sense) { printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n"); megasas_teardown_frame_pool_fusion(instance); return -ENOMEM; } } return 0; } /** * megasas_alloc_cmds_fusion - Allocates the command packets * @instance: Adapter soft state * * * Each frame has a 32-bit field called context. This context is used to get * back the megasas_cmd_fusion from the frame when a frame gets completed * In this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd_fusion given the context. * The free commands themselves are maintained in a linked list called cmd_pool. * * cmds are formed in the io_request and sg_frame members of the * megasas_cmd_fusion. The context field is used to get a request descriptor * and is used as SMID of the cmd. * SMID value range is from 1 to max_fw_cmds. */ int megasas_alloc_cmds_fusion(struct megasas_instance *instance) { int i, j, count; u32 max_cmd, io_frames_sz; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; u32 offset; dma_addr_t io_req_base_phys; u8 *io_req_base; fusion = instance->ctrl_context; max_cmd = instance->max_fw_cmds; fusion->req_frames_desc = dma_alloc_coherent(&instance->pdev->dev, fusion->request_alloc_sz, &fusion->req_frames_desc_phys, GFP_KERNEL); if (!fusion->req_frames_desc) { printk(KERN_ERR "megasas; Could not allocate memory for " "request_frames\n"); goto fail_req_desc; } count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; fusion->reply_frames_desc_pool = pci_pool_create("reply_frames pool", instance->pdev, fusion->reply_alloc_sz * count, 16, 0); if (!fusion->reply_frames_desc_pool) { printk(KERN_ERR "megasas; Could not allocate memory for " "reply_frame pool\n"); goto fail_reply_desc; } fusion->reply_frames_desc = pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &fusion->reply_frames_desc_phys); if (!fusion->reply_frames_desc) { printk(KERN_ERR "megasas; Could not allocate memory for " "reply_frame pool\n"); pci_pool_destroy(fusion->reply_frames_desc_pool); goto fail_reply_desc; } reply_desc = fusion->reply_frames_desc; for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = ULLONG_MAX; io_frames_sz = fusion->io_frames_alloc_sz; fusion->io_request_frames_pool = pci_pool_create("io_request_frames pool", instance->pdev, fusion->io_frames_alloc_sz, 16, 0); if (!fusion->io_request_frames_pool) { printk(KERN_ERR "megasas: Could not allocate memory for " "io_request_frame pool\n"); goto fail_io_frames; } fusion->io_request_frames = pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { printk(KERN_ERR "megasas: Could not allocate memory for " "io_request_frames frames\n"); pci_pool_destroy(fusion->io_request_frames_pool); goto fail_io_frames; } /* * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. * Allocate the dynamic array first and then allocate individual * commands. */ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, GFP_KERNEL); if (!fusion->cmd_list) { printk(KERN_DEBUG "megasas: out of memory. Could not alloc " "memory for cmd_list_fusion\n"); goto fail_cmd_list; } max_cmd = instance->max_fw_cmds; for (i = 0; i < max_cmd; i++) { fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); if (!fusion->cmd_list[i]) { printk(KERN_ERR "Could not alloc cmd list fusion\n"); for (j = 0; j < i; j++) kfree(fusion->cmd_list[j]); kfree(fusion->cmd_list); fusion->cmd_list = NULL; goto fail_cmd_list; } } /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */ io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; /* * Add all the commands to command pool (fusion->cmd_pool) */ /* SMID 0 is reserved. Set SMID/index from 1 */ for (i = 0; i < max_cmd; i++) { cmd = fusion->cmd_list[i]; offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); cmd->index = i + 1; cmd->scmd = NULL; cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */ cmd->instance = instance; cmd->io_request = (struct MPI2_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; list_add_tail(&cmd->list, &fusion->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool_fusion(instance)) { printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); megasas_free_cmds_fusion(instance); goto fail_req_desc; } return 0; fail_cmd_list: pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, fusion->io_request_frames_phys); pci_pool_destroy(fusion->io_request_frames_pool); fail_io_frames: dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, fusion->reply_frames_desc, fusion->reply_frames_desc_phys); pci_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc, fusion->reply_frames_desc_phys); pci_pool_destroy(fusion->reply_frames_desc_pool); fail_reply_desc: dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, fusion->req_frames_desc, fusion->req_frames_desc_phys); fail_req_desc: return -ENOMEM; } /** * wait_and_poll - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd) { int i; struct megasas_header *frame_hdr = &cmd->frame->hdr; u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; /* * Wait for cmd_status to change */ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { rmb(); msleep(20); } if (frame_hdr->cmd_status == 0xff) return -ETIME; return 0; } /** * megasas_ioc_init_fusion - Initializes the FW * @instance: Adapter soft state * * Issues the IOC Init cmd */ int megasas_ioc_init_fusion(struct megasas_instance *instance) { struct megasas_init_frame *init_frame; struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; dma_addr_t ioc_init_handle; struct megasas_cmd *cmd; u8 ret; struct fusion_context *fusion; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; int i; struct megasas_header *frame_hdr; fusion = instance->ctrl_context; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_ERR "Could not allocate cmd for INIT Frame\n"); ret = 1; goto fail_get_cmd; } IOCInitMessage = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), &ioc_init_handle, GFP_KERNEL); if (!IOCInitMessage) { printk(KERN_ERR "Could not allocate memory for " "IOCInitMessage\n"); ret = 1; goto fail_fw_init; } memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMessage->MsgVersion = MPI2_VERSION; IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; IOCInitMessage->SystemRequestFrameSize = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; IOCInitMessage->ReplyDescriptorPostQueueAddress = fusion->reply_frames_desc_phys; IOCInitMessage->SystemRequestFrameBaseAddress = fusion->io_request_frames_phys; IOCInitMessage->HostMSIxVectors = instance->msix_vectors; init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; req_desc->Words = cmd->frame_phys_addr; req_desc->MFAIo.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); /* * disable the intr before firing the init frame */ instance->instancet->disable_intr(instance->reg_set); for (i = 0; i < (10 * 1000); i += 20) { if (readl(&instance->reg_set->doorbell) & 1) msleep(20); else break; } instance->instancet->fire_cmd(instance, req_desc->u.low, req_desc->u.high, instance->reg_set); wait_and_poll(instance, cmd); frame_hdr = &cmd->frame->hdr; if (frame_hdr->cmd_status != 0) { ret = 1; goto fail_fw_init; } printk(KERN_ERR "megasas:IOC Init cmd success\n"); ret = 0; fail_fw_init: megasas_return_cmd(instance, cmd); if (IOCInitMessage) dma_free_coherent(&instance->pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), IOCInitMessage, ioc_init_handle); fail_get_cmd: return ret; } /* * megasas_get_ld_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * @pend: Pend the command or not * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_map_info(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_FW_RAID_MAP_ALL *ci; dma_addr_t ci_h = 0; u32 size_map_info; struct fusion_context *fusion; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n"); return -ENOMEM; } fusion = instance->ctrl_context; if (!fusion) { megasas_return_cmd(instance, cmd); return 1; } dcmd = &cmd->frame->dcmd; size_map_info = sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); ci = fusion->ld_map[(instance->map_id & 1)]; ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = size_map_info; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = size_map_info; if (!megasas_issue_polled(instance, cmd)) ret = 0; else { printk(KERN_ERR "megasas: Get LD Map Info Failed\n"); ret = -1; } megasas_return_cmd(instance, cmd); return ret; } u8 megasas_get_map_info(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; fusion->fast_path_io = 0; if (!megasas_get_ld_map_info(instance)) { if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)], fusion->load_balance_info)) { fusion->fast_path_io = 1; return 0; } } return 1; } /* * megasas_sync_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ int megasas_sync_map_info(struct megasas_instance *instance) { int ret = 0, i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u32 size_sync_info, num_lds; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_FW_RAID_MAP_ALL *map; struct MR_LD_RAID *raid; struct MR_LD_TARGET_SYNC *ld_sync; dma_addr_t ci_h = 0; u32 size_map_info; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get cmd for sync" "info.\n"); return -ENOMEM; } fusion = instance->ctrl_context; if (!fusion) { megasas_return_cmd(instance, cmd); return 1; } map = fusion->ld_map[instance->map_id & 1]; num_lds = map->raidMap.ldCount; dcmd = &cmd->frame->dcmd; size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); ci = (struct MR_LD_TARGET_SYNC *) fusion->ld_map[(instance->map_id - 1) & 1]; memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL)); ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; ld_sync = (struct MR_LD_TARGET_SYNC *)ci; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } size_map_info = sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = size_map_info; dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = size_map_info; instance->map_update_cmd = cmd; instance->instancet->issue_dcmd(instance, cmd); return ret; } /** * megasas_init_adapter_fusion - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware. */ u32 megasas_init_adapter_fusion(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; struct fusion_context *fusion; u32 max_cmd; int i = 0, count; fusion = instance->ctrl_context; reg_set = instance->reg_set; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008); /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; /* Only internal cmds (DCMD) need to have MFI frames */ instance->max_mfi_cmds = MEGASAS_INT_CMDS; max_cmd = instance->max_fw_cmds; fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16; fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *(fusion->reply_q_depth); fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); /* Extra 1 for SMID 0 */ fusion->max_sge_in_main_msg = (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; fusion->max_sge_in_chain = MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); instance->max_num_sge = fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2; /* Used for pass thru MFI frame (DCMD) */ fusion->chain_offset_mfi_pthru = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; fusion->chain_offset_io_request = (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(union MPI2_SGE_IO_UNION))/16; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; for (i = 0 ; i < count; i++) fusion->last_reply_idx[i] = 0; /* * Allocate memory for descriptors * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_mfi_cmds; if (megasas_alloc_cmds_fusion(instance)) goto fail_alloc_cmds; if (megasas_ioc_init_fusion(instance)) goto fail_ioc_init; instance->flag_ieee = 1; fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); fusion->fast_path_io = 0; for (i = 0; i < 2; i++) { fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, fusion->map_sz, &fusion->ld_map_phys[i], GFP_KERNEL); if (!fusion->ld_map[i]) { printk(KERN_ERR "megasas: Could not allocate memory " "for map info\n"); goto fail_map_info; } } if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); return 0; fail_map_info: if (i == 1) dma_free_coherent(&instance->pdev->dev, fusion->map_sz, fusion->ld_map[0], fusion->ld_map_phys[0]); fail_ioc_init: megasas_free_cmds_fusion(instance); fail_alloc_cmds: megasas_free_cmds(instance); fail_alloc_mfi_cmds: return 1; } /** * megasas_fire_cmd_fusion - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ void megasas_fire_cmd_fusion(struct megasas_instance *instance, dma_addr_t req_desc_lo, u32 req_desc_hi, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(req_desc_lo, &(regs)->inbound_low_queue_port); writel(req_desc_hi, &(regs)->inbound_high_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * map_cmd_status - Maps FW cmd status to OS cmd status * @cmd : Pointer to cmd * @status : status of cmd returned by FW * @ext_status : ext status of cmd returned by FW */ void map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) { switch (status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | ext_status; if (ext_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, SCSI_SENSE_BUFFERSIZE); cmd->scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: cmd->scmd->result = DID_IMM_RETRY << 16; break; default: printk(KERN_DEBUG "megasas: FW status %#x\n", status); cmd->scmd->result = DID_ERROR << 16; break; } } /** * megasas_make_sgl_fusion - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: cmd we are working on * * If successful, this function returns the number of SG elements. */ static int megasas_make_sgl_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, struct megasas_cmd_fusion *cmd) { int i, sg_processed, sge_count; struct scatterlist *os_sgl; struct fusion_context *fusion; fusion = instance->ctrl_context; if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count > instance->max_num_sge || !sge_count) return sge_count; scsi_for_each_sg(scp, os_sgl, sge_count, i) { sgl_ptr->Length = sg_dma_len(os_sgl); sgl_ptr->Address = sg_dma_address(os_sgl); sgl_ptr->Flags = 0; if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; } sgl_ptr++; sg_processed = i + 1; if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = fusion-> chain_offset_io_request; else cmd->io_request->ChainOffset = 0; } else cmd->io_request->ChainOffset = fusion->chain_offset_io_request; sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) *(sge_count - sg_processed)); sg_chain->Address = cmd->sg_frame_phys_addr; sgl_ptr = (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; } } return sge_count; } /** * megasas_set_pd_lba - Sets PD LBA * @cdb: CDB * @cdb_len: cdb length * @start_blk: Start block of IO * * Used to set the PD LBA in CDB for FP IOs */ void megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) { struct MR_LD_RAID *raid; u32 ld; u64 start_blk = io_info->pdBlock; u8 *cdb = io_request->CDB.CDB32; u32 num_blocks = io_info->numBlocks; u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; /* Check if T10 PI (DIF) is enabled for this LD */ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; else cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; /* LBA */ cdb[12] = (u8)((start_blk >> 56) & 0xff); cdb[13] = (u8)((start_blk >> 48) & 0xff); cdb[14] = (u8)((start_blk >> 40) & 0xff); cdb[15] = (u8)((start_blk >> 32) & 0xff); cdb[16] = (u8)((start_blk >> 24) & 0xff); cdb[17] = (u8)((start_blk >> 16) & 0xff); cdb[18] = (u8)((start_blk >> 8) & 0xff); cdb[19] = (u8)(start_blk & 0xff); /* Logical block reference tag */ io_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; io_request->IoFlags = 32; /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u8)((num_blocks >> 24) & 0xff); cdb[29] = (u8)((num_blocks >> 16) & 0xff); cdb[30] = (u8)((num_blocks >> 8) & 0xff); cdb[31] = (u8)(num_blocks & 0xff); /* set SCSI IO EEDPFlags */ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; } else { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; } io_request->Control |= (0x4 << 26); io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE; } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && (start_blk <= 0xffffffff)) { if (cdb_len == 16) { opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[14]; control = cdb[15]; } else { opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[6] = groupnum; cdb[9] = control; /* Transfer length */ cdb[8] = (u8)(num_blocks & 0xff); cdb[7] = (u8)((num_blocks >> 8) & 0xff); io_request->IoFlags = 10; /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ switch (cdb_len) { case 6: opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; control = cdb[5]; break; case 10: opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[6]; control = cdb[9]; break; case 12: opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; break; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[14] = groupnum; cdb[15] = control; /* Transfer length */ cdb[13] = (u8)(num_blocks & 0xff); cdb[12] = (u8)((num_blocks >> 8) & 0xff); cdb[11] = (u8)((num_blocks >> 16) & 0xff); cdb[10] = (u8)((num_blocks >> 24) & 0xff); io_request->IoFlags = 16; /* Specify 16-byte cdb */ cdb_len = 16; } /* Normal case, just load LBA here */ switch (cdb_len) { case 6: { u8 val = cdb[1] & 0xE0; cdb[3] = (u8)(start_blk & 0xff); cdb[2] = (u8)((start_blk >> 8) & 0xff); cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); break; } case 10: cdb[5] = (u8)(start_blk & 0xff); cdb[4] = (u8)((start_blk >> 8) & 0xff); cdb[3] = (u8)((start_blk >> 16) & 0xff); cdb[2] = (u8)((start_blk >> 24) & 0xff); break; case 12: cdb[5] = (u8)(start_blk & 0xff); cdb[4] = (u8)((start_blk >> 8) & 0xff); cdb[3] = (u8)((start_blk >> 16) & 0xff); cdb[2] = (u8)((start_blk >> 24) & 0xff); break; case 16: cdb[9] = (u8)(start_blk & 0xff); cdb[8] = (u8)((start_blk >> 8) & 0xff); cdb[7] = (u8)((start_blk >> 16) & 0xff); cdb[6] = (u8)((start_blk >> 24) & 0xff); cdb[5] = (u8)((start_blk >> 32) & 0xff); cdb[4] = (u8)((start_blk >> 40) & 0xff); cdb[3] = (u8)((start_blk >> 48) & 0xff); cdb[2] = (u8)((start_blk >> 56) & 0xff); break; } } } /** * megasas_build_ldio_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Prepares the io_request and chain elements (sg_frame) for IO * The IO can be for PD (Fast Path) or LD */ void megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { u8 fp_possible; u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_FW_RAID_MAP_ALL *local_map_ptr; device_id = MEGASAS_DEV_INDEX(instance, scp); fusion = instance->ctrl_context; io_request = cmd->io_request; io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->RaidContext.status = 0; io_request->RaidContext.exStatus = 0; req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; start_lba_lo = 0; start_lba_hi = 0; fp_possible = 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { datalength = (u32) scp->cmnd[4]; start_lba_lo = ((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; start_lba_lo &= 0x1FFFFF; } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { datalength = (u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8); start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { datalength = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { datalength = ((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; start_lba_lo = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; start_lba_hi = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; io_request->DataLength = scsi_bufflen(scp); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) { io_request->RaidContext.regLockFlags = 0; fp_possible = 0; } else { if (MR_BuildRaidContext(instance, &io_info, &io_request->RaidContext, local_map_ptr)) fp_possible = io_info.fpOkForIo; } /* Use smp_processor_id() for now until cmd->request->cpu is CPU id by default, not CPU group id, otherwise all MSI-X queues won't be utilized */ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.nseg = 0x1; io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } if ((fusion->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = get_updated_dev_handle( &fusion->load_balance_info[device_id], &io_info); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; } else { io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.nseg = 0x1; } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = device_id; } /* Not FP */ } /** * megasas_build_dcdb_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Prepares the io_request frame for non-io cmds */ static void megasas_build_dcdb_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; u16 pd_index = 0; struct MR_FW_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(instance, scmd); pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +scmd->device->id; local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; /* Check if this is a system PD I/O */ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { io_request->Function = 0; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; io_request->RaidContext.regLockFlags = 0; io_request->RaidContext.regLockRowLBA = 0; io_request->RaidContext.regLockLength = 0; io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); cmd->request_desc->SCSIIO.DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = device_id; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->LUN[1] = scmd->device->lun; io_request->DataLength = scsi_bufflen(scmd); } /** * megasas_build_io_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Invokes helper functions to prepare request frames * and sets flags appropriate for IO/Non-IO cmd */ int megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { u32 device_id, sge_count; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(instance, scp); /* Zero out some fields so they don't get reused */ io_request->LUN[1] = 0; io_request->CDB.EEDP32.PrimaryReferenceTag = 0; io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; io_request->EEDPFlags = 0; io_request->Control = 0; io_request->EEDPBlockSize = 0; io_request->ChainOffset = 0; io_request->RaidContext.RAIDFlags = 0; io_request->RaidContext.Type = 0; io_request->RaidContext.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); /* * Just the CDB length,rest of the Flags are zero * This will be modified for FP in build_ldio_fusion */ io_request->IoFlags = scp->cmd_len; if (megasas_is_ldio(scp)) megasas_build_ldio_fusion(instance, scp, cmd); else megasas_build_dcdb_fusion(instance, scp, cmd); /* * Construct SGL */ sge_count = megasas_make_sgl_fusion(instance, scp, (struct MPI25_IEEE_SGE_CHAIN64 *) &io_request->SGL, cmd); if (sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds " "max (0x%x) allowed\n", sge_count, instance->max_num_sge); return 1; } io_request->RaidContext.numSGE = sge_count; io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; if (scp->sc_data_direction == PCI_DMA_TODEVICE) io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_request->Control |= MPI2_SCSIIO_CONTROL_READ; io_request->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; io_request->SenseBufferLowAddress = cmd->sense_phys_addr; io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; cmd->scmd = scp; scp->SCp.ptr = (char *)cmd; return 0; } union MEGASAS_REQUEST_DESCRIPTOR_UNION * megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) { u8 *p; struct fusion_context *fusion; if (index >= instance->max_fw_cmds) { printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " "descriptor\n", index); return NULL; } fusion = instance->ctrl_context; p = fusion->req_frames_desc +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; } /** * megasas_build_and_issue_cmd_fusion -Main routine for building and * issuing non IOCTL cmd * @instance: Adapter soft state * @scmd: pointer to scsi cmd from OS */ static u32 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd_fusion *cmd; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 index; struct fusion_context *fusion; fusion = instance->ctrl_context; cmd = megasas_get_cmd_fusion(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); if (!req_desc) return 1; req_desc->Words = 0; cmd->request_desc = req_desc; if (megasas_build_io_fusion(instance, scmd, cmd)) { megasas_return_cmd_fusion(instance, cmd); printk(KERN_ERR "megasas: Error building command.\n"); cmd->request_desc = NULL; return 1; } req_desc = cmd->request_desc; req_desc->SCSIIO.SMID = index; if (cmd->io_request->ChainOffset != 0 && cmd->io_request->ChainOffset != 0xF) printk(KERN_ERR "megasas: The chain offset value is not " "correct : %x\n", cmd->io_request->ChainOffset); /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, req_desc->u.low, req_desc->u.high, instance->reg_set); return 0; } /** * complete_cmd_fusion - Completes command * @instance: Adapter soft state * Completes all commands that is in reply descriptor queue */ int complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) { union MPI2_REPLY_DESCRIPTORS_UNION *desc; struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; u16 smid, num_completed; u8 reply_descript_type, arm; u32 status, extStatus, device_id; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; fusion = instance->ctrl_context; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) return IRQ_HANDLED; desc = fusion->reply_frames_desc; desc += ((MSIxIndex * fusion->reply_alloc_sz)/ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) + fusion->last_reply_idx[MSIxIndex]; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; d_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) return IRQ_NONE; num_completed = 0; while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { smid = reply_desc->SMID; cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; if (cmd_fusion->scmd) cmd_fusion->scmd->SCp.ptr = NULL; status = scsi_io_req->RaidContext.status; extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ device_id = MEGASAS_DEV_INDEX(instance, cmd_fusion->scmd); lbinfo = &fusion->load_balance_info[device_id]; if (cmd_fusion->scmd->SCp.Status & MEGASAS_LOAD_BALANCE_FLAG) { arm = lbinfo->raid1DevHandle[0] == cmd_fusion->io_request->DevHandle ? 0 : 1; atomic_dec(&lbinfo->scsi_pending_cmds[arm]); cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { if (megasas_dbg_lvl == 5) printk(KERN_ERR "\nmegasas: FAST Path " "IO Success\n"); } /* Fall thru and complete IO */ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ /* Map the FW Cmd Status */ map_cmd_status(cmd_fusion, status, extStatus); scsi_dma_unmap(cmd_fusion->scmd); cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; megasas_return_cmd_fusion(instance, cmd_fusion); atomic_dec(&instance->fw_outstanding); break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; megasas_complete_cmd(instance, cmd_mfi, DID_OK); cmd_fusion->flags = 0; megasas_return_cmd_fusion(instance, cmd_fusion); break; } fusion->last_reply_idx[MSIxIndex]++; if (fusion->last_reply_idx[MSIxIndex] >= fusion->reply_q_depth) fusion->last_reply_idx[MSIxIndex] = 0; desc->Words = ULLONG_MAX; num_completed++; /* Get the next reply descriptor */ if (!fusion->last_reply_idx[MSIxIndex]) desc = fusion->reply_frames_desc + ((MSIxIndex * fusion->reply_alloc_sz)/ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)); else desc++; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; d_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; } if (!num_completed) return IRQ_NONE; wmb(); writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex], &instance->reg_set->reply_post_host_index); megasas_check_and_restore_queue_depth(instance); return IRQ_HANDLED; } /** * megasas_complete_cmd_dpc_fusion - Completes command * @instance: Adapter soft state * * Tasklet to complete cmds */ void megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) { struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; u32 count, MSIxIndex; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; /* If we have already declared adapter dead, donot complete cmds */ spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { spin_unlock_irqrestore(&instance->hba_lock, flags); return; } spin_unlock_irqrestore(&instance->hba_lock, flags); for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) complete_cmd_fusion(instance, MSIxIndex); } /** * megasas_isr_fusion - isr entry point */ irqreturn_t megasas_isr_fusion(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; u32 mfiStatus, fw_state; if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance->reg_set); if (!mfiStatus) return IRQ_NONE; } /* If we are resetting, bail */ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { instance->instancet->clear_intr(instance->reg_set); return IRQ_HANDLED; } if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { instance->instancet->clear_intr(instance->reg_set); /* If we didn't complete any commands, check for FW fault */ fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) schedule_work(&instance->work_init); } return IRQ_HANDLED; } /** * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru * @instance: Adapter soft state * mfi_cmd: megasas_cmd pointer * */ u8 build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd *mfi_cmd) { struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; struct MPI2_RAID_SCSI_IO_REQUEST *io_req; struct megasas_cmd_fusion *cmd; struct fusion_context *fusion; struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; cmd = megasas_get_cmd_fusion(instance); if (!cmd) return 1; /* Save the smid. To be used for returning the cmd */ mfi_cmd->context.smid = cmd->index; cmd->sync_cmd_idx = mfi_cmd->index; /* * For cmds where the flag is set, store the flag and check * on completion. For cmds with this flag, don't call * megasas_complete_cmd */ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; fusion = instance->ctrl_context; io_req = cmd->io_request; if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = fusion->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; return 0; } /** * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd * @instance: Adapter soft state * @cmd: mfi cmd to build * */ union MEGASAS_REQUEST_DESCRIPTOR_UNION * build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u16 index; if (build_mpt_mfi_pass_thru(instance, cmd)) { printk(KERN_ERR "Couldn't build MFI pass thru cmd\n"); return NULL; } index = cmd->context.smid; req_desc = megasas_get_request_descriptor(instance, index - 1); if (!req_desc) return NULL; req_desc->Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = index; return req_desc; } /** * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd * @instance: Adapter soft state * @cmd: mfi cmd pointer * */ void megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = build_mpt_cmd(instance, cmd); if (!req_desc) { printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); return; } instance->instancet->fire_cmd(instance, req_desc->u.low, req_desc->u.high, instance->reg_set); } /** * megasas_release_fusion - Reverses the FW initialization * @intance: Adapter soft state */ void megasas_release_fusion(struct megasas_instance *instance) { megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, instance->bar); } /** * megasas_read_fw_status_reg_fusion - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_adp_reset_fusion - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_fusion(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { return 0; } /** * megasas_check_reset_fusion - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_fusion(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { return 0; } /* This function waits for outstanding commands on fusion to complete */ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) { int i, outstanding, retval = 0; u32 fw_state; for (i = 0; i < resetwaittime; i++) { /* Check if firmware is in fault state */ fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { printk(KERN_WARNING "megasas: Found FW in FAULT state," " will reset adapter.\n"); retval = 1; goto out; } outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) goto out; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: [%2d]waiting for %d " "commands to complete\n", i, outstanding); megasas_complete_cmd_dpc_fusion( (unsigned long)instance); } msleep(1000); } if (atomic_read(&instance->fw_outstanding)) { printk("megaraid_sas: pending commands remain after waiting, " "will reset adapter.\n"); retval = 1; } out: return retval; } void megasas_reset_reply_desc(struct megasas_instance *instance) { int i, count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; fusion = instance->ctrl_context; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; for (i = 0 ; i < count ; i++) fusion->last_reply_idx[i] = 0; reply_desc = fusion->reply_frames_desc; for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = ULLONG_MAX; } /* Core fusion reset function */ int megasas_reset_fusion(struct Scsi_Host *shost) { int retval = SUCCESS, i, j, retry = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 host_diag, abs_state, status_reg, reset_adapter; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_WARNING "megaraid_sas: Hardware critical error, " "returning FAILED.\n"); return FAILED; } mutex_lock(&instance->reset_mutex); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->instancet->disable_intr(instance->reg_set); msleep(1000); /* First try waiting for commands to complete */ if (megasas_wait_for_outstanding_fusion(instance)) { printk(KERN_WARNING "megaraid_sas: resetting fusion " "adapter.\n"); /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_fw_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->scmd) { scsi_dma_unmap(cmd_fusion->scmd); cmd_fusion->scmd->result = (DID_RESET << 16); cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); megasas_return_cmd_fusion(instance, cmd_fusion); atomic_dec(&instance->fw_outstanding); } } status_reg = instance->instancet->read_fw_status_reg( instance->reg_set); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (instance->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ printk(KERN_WARNING "megaraid_sas: Reset not supported" ", killing adapter.\n"); megaraid_sas_kill_hba(instance); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; retval = FAILED; goto out; } /* Now try to reset the chip */ for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); /* Check that the diag write enable (DRWE) bit is on */ host_diag = readl(&instance->reg_set->fusion_host_diag); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { msleep(100); host_diag = readl(&instance->reg_set->fusion_host_diag); if (retry++ == 100) { printk(KERN_WARNING "megaraid_sas: " "Host diag unlock failed!\n"); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) continue; /* Send chip reset command */ writel(host_diag | HOST_DIAG_RESET_ADAPTER, &instance->reg_set->fusion_host_diag); msleep(3000); /* Make sure reset adapter bit is cleared */ host_diag = readl(&instance->reg_set->fusion_host_diag); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { msleep(100); host_diag = readl(&instance->reg_set->fusion_host_diag); if (retry++ == 1000) { printk(KERN_WARNING "megaraid_sas: " "Diag reset adapter never " "cleared!\n"); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) continue; abs_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { msleep(100); abs_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { printk(KERN_WARNING "megaraid_sas: firmware " "state < MFI_STATE_FW_INIT, state = " "0x%x\n", abs_state); continue; } /* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, 1)) { printk(KERN_WARNING "megaraid_sas: Failed to " "transition controller to ready.\n"); continue; } megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { printk(KERN_WARNING "megaraid_sas: " "megasas_ioc_init_fusion() failed!\n"); continue; } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance->reg_set); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; /* Re-fire management commands */ for (j = 0 ; j < instance->max_fw_cmds; j++) { cmd_fusion = fusion->cmd_list[j]; if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { cmd_mfi = instance-> cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) { megasas_return_cmd(instance, cmd_mfi); megasas_return_cmd_fusion( instance, cmd_fusion); } else { req_desc = megasas_get_request_descriptor( instance, cmd_mfi->context.smid -1); if (!req_desc) printk(KERN_WARNING "req_desc NULL" "\n"); else { instance->instancet-> fire_cmd(instance, req_desc-> u.low, req_desc-> u.high, instance-> reg_set); } } } } /* Reset load balance info */ memset(fusion->load_balance_info, 0, sizeof(struct LD_LOAD_BALANCE_INFO) *MAX_LOGICAL_DRIVES); if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); /* Adapter reset completed successfully */ printk(KERN_WARNING "megaraid_sas: Reset " "successful.\n"); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ printk(KERN_WARNING "megaraid_sas: Reset failed, killing " "adapter.\n"); megaraid_sas_kill_hba(instance); retval = FAILED; } else { clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance->reg_set); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; } out: clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); mutex_unlock(&instance->reset_mutex); return retval; } /* Fusion OCR work queue */ void megasas_fusion_ocr_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); megasas_reset_fusion(instance->host); } struct megasas_instance_template megasas_instance_template_fusion = { .fire_cmd = megasas_fire_cmd_fusion, .enable_intr = megasas_enable_intr_fusion, .disable_intr = megasas_disable_intr_fusion, .clear_intr = megasas_clear_intr_fusion, .read_fw_status_reg = megasas_read_fw_status_reg_fusion, .adp_reset = megasas_adp_reset_fusion, .check_reset = megasas_check_reset_fusion, .service_isr = megasas_isr_fusion, .tasklet = megasas_complete_cmd_dpc_fusion, .init_adapter = megasas_init_adapter_fusion, .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, .issue_dcmd = megasas_issue_dcmd_fusion, };
gpl-2.0
adknight87/android_kernel_samsung_afyonltetmo
arch/arm/kernel/ptrace.c
402
22909
/* * linux/arch/arm/kernel/ptrace.c * * By Ross Biro 1/23/92 * edited by Linus Torvalds * ARM modifications Copyright (C) 2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/elf.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/regset.h> #include <linux/audit.h> #include <asm/pgtable.h> #include <asm/traps.h> #define REG_PC 15 #define REG_PSR 16 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ #if 0 /* * Breakpoint SWI instruction: SWI &9F0001 */ #define BREAKINST_ARM 0xef9f0001 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ #else /* * New breakpoints - use an undefined instruction. The ARM architecture * reference manual guarantees that the following instruction space * will produce an undefined instruction exception on all CPUs: * * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx * Thumb: 1101 1110 xxxx xxxx */ #define BREAKINST_ARM 0xe7f001f0 #define BREAKINST_THUMB 0xde01 #endif struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) \ {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(r0), REG_OFFSET_NAME(r1), REG_OFFSET_NAME(r2), REG_OFFSET_NAME(r3), REG_OFFSET_NAME(r4), REG_OFFSET_NAME(r5), REG_OFFSET_NAME(r6), REG_OFFSET_NAME(r7), REG_OFFSET_NAME(r8), REG_OFFSET_NAME(r9), REG_OFFSET_NAME(r10), REG_OFFSET_NAME(fp), REG_OFFSET_NAME(ip), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(lr), REG_OFFSET_NAME(pc), REG_OFFSET_NAME(cpsr), REG_OFFSET_NAME(ORIG_r0), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } /** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); } /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) return *addr; else return 0; } /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline long get_user_reg(struct task_struct *task, int offset) { return task_pt_regs(task)->uregs[offset]; } /* * this routine will put a word on the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline int put_user_reg(struct task_struct *task, int offset, long data) { struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL; newregs = *regs; newregs.uregs[offset] = data; if (valid_user_regs(&newregs)) { regs->uregs[offset] = data; ret = 0; } return ret; } /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { /* Nothing to do. */ } /* * Handle hitting a breakpoint. */ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *)instruction_pointer(regs); force_sig_info(SIGTRAP, &info, tsk); } static int break_trap(struct pt_regs *regs, unsigned int instr) { ptrace_break(current, regs); return 0; } static struct undef_hook arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = 0x07f001f0, .cpsr_mask = PSR_T_BIT, .cpsr_val = 0, .fn = break_trap, }; static struct undef_hook thumb_break_hook = { .instr_mask = 0xffff, .instr_val = 0xde01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, }; static struct undef_hook thumb2_break_hook = { .instr_mask = 0xffffffff, .instr_val = 0xf7f0a000, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, }; static int __init ptrace_break_init(void) { register_undef_hook(&arm_break_hook); register_undef_hook(&thumb_break_hook); register_undef_hook(&thumb2_break_hook); return 0; } core_initcall(ptrace_break_init); /* * Read the word at offset "off" into the "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, unsigned long __user *ret) { unsigned long tmp; if (off & 3) return -EIO; tmp = 0; if (off == PT_TEXT_ADDR) tmp = tsk->mm->start_code; else if (off == PT_DATA_ADDR) tmp = tsk->mm->start_data; else if (off == PT_TEXT_END_ADDR) tmp = tsk->mm->end_code; else if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); else if (off >= sizeof(struct user)) return -EIO; return put_user(tmp, ret); } /* * Write the word at offset "off" into "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, unsigned long val) { if (off & 3 || off >= sizeof(struct user)) return -EIO; if (off >= sizeof(struct pt_regs)) return 0; return put_user_reg(tsk, off >> 2, val); } #ifdef CONFIG_IWMMXT /* * Get the child iWMMXt state. */ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -ENODATA; iwmmxt_task_disable(thread); /* force it to ram */ return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) ? -EFAULT : 0; } /* * Set the child iWMMXt state. */ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -EACCES; iwmmxt_task_release(thread); /* force a reload */ return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) ? -EFAULT : 0; } #endif #ifdef CONFIG_CRUNCH /* * Get the child Crunch state. */ static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); crunch_task_disable(thread); /* force it to ram */ return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) ? -EFAULT : 0; } /* * Set the child Crunch state. */ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); crunch_task_release(thread); /* force a reload */ return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) ? -EFAULT : 0; } #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Convert a virtual register number into an index for a thread_info * breakpoint array. Breakpoints are identified using positive numbers * whilst watchpoints are negative. The registers are laid out as pairs * of (address, control), each pair mapping to a unique hw_breakpoint struct. * Register 0 is reserved for describing resource information. */ static int ptrace_hbp_num_to_idx(long num) { if (num < 0) num = (ARM_MAX_BRP << 1) - num; return (num - 1) >> 1; } /* * Returns the virtual register number for the address of the * breakpoint at index idx. */ static long ptrace_hbp_idx_to_num(int idx) { long mid = ARM_MAX_BRP << 1; long num = (idx << 1) + 1; return num > mid ? mid - num : num; } /* * Handle hitting a HW-breakpoint. */ static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i; siginfo_t info; for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break; num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); info.si_signo = SIGTRAP; info.si_errno = (int)num; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)(bkpt->trigger); force_sig_info(SIGTRAP, &info, current); } /* * Set ptrace breakpoint pointers to zero for this task. * This is required in order to prevent child processes from unregistering * breakpoints held by their parent. */ void clear_ptrace_hw_breakpoint(struct task_struct *tsk) { memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); } /* * Unregister breakpoints from this task and reset the pointers in * the thread_struct. */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { if (t->debug.hbp[i]) { unregister_hw_breakpoint(t->debug.hbp[i]); t->debug.hbp[i] = NULL; } } } static u32 ptrace_get_hbp_resource_info(void) { u8 num_brps, num_wrps, debug_arch, wp_len; u32 reg = 0; num_brps = hw_breakpoint_slots(TYPE_INST); num_wrps = hw_breakpoint_slots(TYPE_DATA); debug_arch = arch_get_debug_arch(); wp_len = arch_get_max_wp_len(); reg |= debug_arch; reg <<= 8; reg |= wp_len; reg <<= 8; reg |= num_wrps; reg <<= 8; reg |= num_brps; return reg; } static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) { struct perf_event_attr attr; ptrace_breakpoint_init(&attr); /* Initialise fields to sane defaults. */ attr.bp_addr = 0; attr.bp_len = HW_BREAKPOINT_LEN_4; attr.bp_type = type; attr.disabled = 1; return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); } static int ptrace_gethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { u32 reg; int idx, ret = 0; struct perf_event *bp; struct arch_hw_breakpoint_ctrl arch_ctrl; if (num == 0) { reg = ptrace_get_hbp_resource_info(); } else { idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { reg = 0; goto put; } arch_ctrl = counter_arch_bp(bp)->ctrl; /* * Fix up the len because we may have adjusted it * to compensate for an unaligned address. */ while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; if (num & 0x1) reg = bp->attr.bp_addr; else reg = encode_ctrl_reg(arch_ctrl); } put: if (put_user(reg, data)) ret = -EFAULT; out: return ret; } static int ptrace_sethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { int idx, gen_len, gen_type, implied_type, ret = 0; u32 user_val; struct perf_event *bp; struct arch_hw_breakpoint_ctrl ctrl; struct perf_event_attr attr; if (num == 0) goto out; else if (num < 0) implied_type = HW_BREAKPOINT_RW; else implied_type = HW_BREAKPOINT_X; idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } if (get_user(user_val, data)) { ret = -EFAULT; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { bp = ptrace_hbp_create(tsk, implied_type); if (IS_ERR(bp)) { ret = PTR_ERR(bp); goto out; } tsk->thread.debug.hbp[idx] = bp; } attr = bp->attr; if (num & 0x1) { /* Address */ attr.bp_addr = user_val; } else { /* Control */ decode_ctrl_reg(user_val, &ctrl); ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); if (ret) goto out; if ((gen_type & implied_type) != gen_type) { ret = -EINVAL; goto out; } attr.bp_len = gen_len; attr.bp_type = gen_type; attr.disabled = !ctrl.enabled; } ret = modify_user_hw_breakpoint(bp, &attr); out: return ret; } #endif /* regset get/set implementations */ static int gpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, sizeof(*regs)); } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs newregs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, sizeof(newregs)); if (ret) return ret; if (!valid_user_regs(&newregs)) return -EINVAL; *task_pt_regs(target) = newregs; return 0; } static int fpa_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &task_thread_info(target)->fpstate, 0, sizeof(struct user_fp)); } static int fpa_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct thread_info *thread = task_thread_info(target); thread->used_cp[1] = thread->used_cp[2] = 1; return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &thread->fpstate, 0, sizeof(struct user_fp)); } #ifdef CONFIG_VFP /* * VFP register get/set implementations. * * With respect to the kernel, struct user_fp is divided into three chunks: * 16 or 32 real VFP registers (d0-d15 or d0-31) * These are transferred to/from the real registers in the task's * vfp_hard_struct. The number of registers depends on the kernel * configuration. * * 16 or 0 fake VFP registers (d16-d31 or empty) * i.e., the user_vfp structure has space for 32 registers even if * the kernel doesn't have them all. * * vfp_get() reads this chunk as zero where applicable * vfp_set() ignores this chunk * * 1 word for the FPSCR * * The bounds-checking logic built into user_regset_copyout and friends * means that we can make a simple sequence of calls to map the relevant data * to/from the specified slice of the user regset structure. */ static int vfp_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vfp->fpregs, user_fpregs_offset, user_fpregs_offset + sizeof(vfp->fpregs)); if (ret) return ret; ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, user_fpregs_offset + sizeof(vfp->fpregs), user_fpscr_offset); if (ret) return ret; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vfp->fpscr, user_fpscr_offset, user_fpscr_offset + sizeof(vfp->fpscr)); } /* * For vfp_set() a read-modify-write is done on the VFP registers, * in order to avoid writing back a half-modified set of registers on * failure. */ static int vfp_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); new_vfp = thread->vfpstate.hard; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, user_fpregs_offset + sizeof(new_vfp.fpregs)); if (ret) return ret; ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, user_fpregs_offset + sizeof(new_vfp.fpregs), user_fpscr_offset); if (ret) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpscr, user_fpscr_offset, user_fpscr_offset + sizeof(new_vfp.fpscr)); if (ret) return ret; vfp_flush_hwstate(thread); thread->vfpstate.hard = new_vfp; return 0; } #endif /* CONFIG_VFP */ enum arm_regset { REGSET_GPR, REGSET_FPR, #ifdef CONFIG_VFP REGSET_VFP, #endif }; static const struct user_regset arm_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(u32), .align = sizeof(u32), .get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { /* * For the FPA regs in fpstate, the real fields are a mixture * of sizes, so pretend that the registers are word-sized: */ .core_note_type = NT_PRFPREG, .n = sizeof(struct user_fp) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = fpa_get, .set = fpa_set }, #ifdef CONFIG_VFP [REGSET_VFP] = { /* * Pretend that the VFP regs are word-sized, since the FPSCR is * a single word dangling at the end of struct user_vfp: */ .core_note_type = NT_ARM_VFP, .n = ARM_VFPREGS_SIZE / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = vfp_get, .set = vfp_set }, #endif /* CONFIG_VFP */ }; static const struct user_regset_view user_arm_view = { .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_arm_view; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *) data; switch (request) { case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, datap); break; case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; case PTRACE_GETREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_SETREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_GETFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; case PTRACE_SETFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; #ifdef CONFIG_IWMMXT case PTRACE_GETWMMXREGS: ret = ptrace_getwmmxregs(child, datap); break; case PTRACE_SETWMMXREGS: ret = ptrace_setwmmxregs(child, datap); break; #endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value[0], datap); break; case PTRACE_SET_SYSCALL: task_thread_info(child)->syscall = data; ret = 0; break; #ifdef CONFIG_CRUNCH case PTRACE_GETCRUNCHREGS: ret = ptrace_getcrunchregs(child, datap); break; case PTRACE_SETCRUNCHREGS: ret = ptrace_setcrunchregs(child, datap); break; #endif #ifdef CONFIG_VFP case PTRACE_GETVFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; case PTRACE_SETVFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: if (ptrace_get_breakpoints(child) < 0) return -ESRCH; ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: if (ptrace_get_breakpoints(child) < 0) return -ESRCH; ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); ptrace_put_breakpoints(child); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; if (why) audit_syscall_exit(regs); else audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); if (!test_thread_flag(TIF_SYSCALL_TRACE)) return scno; if (!(current->ptrace & PT_PTRACED)) return scno; current_thread_info()->syscall = scno; /* * IP is used to denote syscall entry/exit: * IP = 0 -> entry, =1 -> exit */ ip = regs->ARM_ip; regs->ARM_ip = why; /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } regs->ARM_ip = ip; return current_thread_info()->syscall; }
gpl-2.0
ShinyROM/android_kernel_asus_grouper
drivers/tty/serial/zs.c
402
31205
/* * zs.c: Serial port driver for IOASIC DECstations. * * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras. * Derived from drivers/macintosh/macserial.c by Harald Koerfgen. * * DECstation changes * Copyright (C) 1998-2000 Harald Koerfgen * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki * * For the rest of the code the original Copyright applies: * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * * * Note: for IOASIC systems the wiring is as follows: * * mouse/keyboard: * DIN-7 MJ-4 signal SCC * 2 1 TxD <- A.TxD * 3 4 RxD -> A.RxD * * EIA-232/EIA-423: * DB-25 MMJ-6 signal SCC * 2 2 TxD <- B.TxD * 3 5 RxD -> B.RxD * 4 RTS <- ~A.RTS * 5 CTS -> ~B.CTS * 6 6 DSR -> ~A.SYNC * 8 CD -> ~B.DCD * 12 DSRS(DCE) -> ~A.CTS (*) * 15 TxC -> B.TxC * 17 RxC -> B.RxC * 20 1 DTR <- ~A.DTR * 22 RI -> ~A.DCD * 23 DSRS(DTE) <- ~B.RTS * * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE) * is shared with DSRS(DTE) at pin 23. * * As you can immediately notice the wiring of the RTS, DTR and DSR signals * is a bit odd. This makes the handling of port B unnecessarily * complicated and prevents the use of some automatic modes of operation. */ #if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/bug.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irqflags.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/spinlock.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/types.h> #include <linux/atomic.h> #include <asm/system.h> #include <asm/dec/interrupts.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/system.h> #include "zs.h" MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); MODULE_DESCRIPTION("DECstation Z85C30 serial driver"); MODULE_LICENSE("GPL"); static char zs_name[] __initdata = "DECstation Z85C30 serial driver version "; static char zs_version[] __initdata = "0.10"; /* * It would be nice to dynamically allocate everything that * depends on ZS_NUM_SCCS, so we could support any number of * Z85C30s, but for now... */ #define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */ #define ZS_NUM_CHAN 2 /* 2 channels per chip. */ #define ZS_CHAN_A 0 /* Index of the channel A. */ #define ZS_CHAN_B 1 /* Index of the channel B. */ #define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */ #define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */ #define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte of the 16-bit IOBUS. */ #define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */ #define to_zport(uport) container_of(uport, struct zs_port, port) struct zs_parms { resource_size_t scc[ZS_NUM_SCCS]; int irq[ZS_NUM_SCCS]; }; static struct zs_scc zs_sccs[ZS_NUM_SCCS]; static u8 zs_init_regs[ZS_NUM_REGS] __initdata = { 0, /* write 0 */ PAR_SPEC, /* write 1 */ 0, /* write 2 */ 0, /* write 3 */ X16CLK | SB1, /* write 4 */ 0, /* write 5 */ 0, 0, 0, /* write 6, 7, 8 */ MIE | DLC | NV, /* write 9 */ NRZ, /* write 10 */ TCBR | RCBR, /* write 11 */ 0, 0, /* BRG time constant, write 12 + 13 */ BRSRC | BRENABL, /* write 14 */ 0, /* write 15 */ }; /* * Debugging. */ #undef ZS_DEBUG_REGS /* * Reading and writing Z85C30 registers. */ static void recovery_delay(void) { udelay(2); } static u8 read_zsreg(struct zs_port *zport, int reg) { void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; u8 retval; if (reg != 0) { writeb(reg & 0xf, control); fast_iob(); recovery_delay(); } retval = readb(control); recovery_delay(); return retval; } static void write_zsreg(struct zs_port *zport, int reg, u8 value) { void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; if (reg != 0) { writeb(reg & 0xf, control); fast_iob(); recovery_delay(); } writeb(value, control); fast_iob(); recovery_delay(); return; } static u8 read_zsdata(struct zs_port *zport) { void __iomem *data = zport->port.membase + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; u8 retval; retval = readb(data); recovery_delay(); return retval; } static void write_zsdata(struct zs_port *zport, u8 value) { void __iomem *data = zport->port.membase + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; writeb(value, data); fast_iob(); recovery_delay(); return; } #ifdef ZS_DEBUG_REGS void zs_dump(void) { struct zs_port *zport; int i, j; for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN]; if (!zport->scc) continue; for (j = 0; j < 16; j++) printk("W%-2d = 0x%02x\t", j, zport->regs[j]); printk("\n"); for (j = 0; j < 16; j++) printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j)); printk("\n\n"); } } #endif static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq) { if (irq) spin_lock_irq(lock); else spin_lock(lock); } static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq) { if (irq) spin_unlock_irq(lock); else spin_unlock(lock); } static int zs_receive_drain(struct zs_port *zport) { int loops = 10000; while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops) read_zsdata(zport); return loops; } static int zs_transmit_drain(struct zs_port *zport, int irq) { struct zs_scc *scc = zport->scc; int loops = 10000; while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) { zs_spin_unlock_cond_irq(&scc->zlock, irq); udelay(2); zs_spin_lock_cond_irq(&scc->zlock, irq); } return loops; } static int zs_line_drain(struct zs_port *zport, int irq) { struct zs_scc *scc = zport->scc; int loops = 10000; while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) { zs_spin_unlock_cond_irq(&scc->zlock, irq); udelay(2); zs_spin_lock_cond_irq(&scc->zlock, irq); } return loops; } static void load_zsregs(struct zs_port *zport, u8 *regs, int irq) { /* Let the current transmission finish. */ zs_line_drain(zport, irq); /* Load 'em up. */ write_zsreg(zport, R3, regs[3] & ~RxENABLE); write_zsreg(zport, R5, regs[5] & ~TxENAB); write_zsreg(zport, R4, regs[4]); write_zsreg(zport, R9, regs[9]); write_zsreg(zport, R1, regs[1]); write_zsreg(zport, R2, regs[2]); write_zsreg(zport, R10, regs[10]); write_zsreg(zport, R14, regs[14] & ~BRENABL); write_zsreg(zport, R11, regs[11]); write_zsreg(zport, R12, regs[12]); write_zsreg(zport, R13, regs[13]); write_zsreg(zport, R14, regs[14]); write_zsreg(zport, R15, regs[15]); if (regs[3] & RxENABLE) write_zsreg(zport, R3, regs[3]); if (regs[5] & TxENAB) write_zsreg(zport, R5, regs[5]); return; } /* * Status handling routines. */ /* * zs_tx_empty() -- get the transmitter empty status * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ static unsigned int zs_tx_empty(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; u8 status; spin_lock_irqsave(&scc->zlock, flags); status = read_zsreg(zport, R1); spin_unlock_irqrestore(&scc->zlock, flags); return status & ALL_SNT ? TIOCSER_TEMT : 0; } static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a, struct zs_port *zport_b) { u8 status_a, status_b; unsigned int mctrl; status_a = read_zsreg(zport_a, R0); status_b = read_zsreg(zport_b, R0); mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) | ((status_b & DCD) ? TIOCM_CAR : 0) | ((status_a & DCD) ? TIOCM_RNG : 0) | ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0); return mctrl; } static unsigned int zs_raw_get_mctrl(struct zs_port *zport) { struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0; } static unsigned int zs_raw_xor_mctrl(struct zs_port *zport) { struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; unsigned int mmask, mctrl, delta; u8 mask_a, mask_b; if (zport == zport_a) return 0; mask_a = zport_a->regs[15]; mask_b = zport->regs[15]; mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) | ((mask_b & DCDIE) ? TIOCM_CAR : 0) | ((mask_a & DCDIE) ? TIOCM_RNG : 0) | ((mask_a & SYNCIE) ? TIOCM_DSR : 0); mctrl = zport->mctrl; if (mmask) { mctrl &= ~mmask; mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask; } delta = mctrl ^ zport->mctrl; if (delta) zport->mctrl = mctrl; return delta; } static unsigned int zs_get_mctrl(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned int mctrl; spin_lock(&scc->zlock); mctrl = zs_raw_get_mctrl(zport); spin_unlock(&scc->zlock); return mctrl; } static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; u8 oldloop, newloop; spin_lock(&scc->zlock); if (zport != zport_a) { if (mctrl & TIOCM_DTR) zport_a->regs[5] |= DTR; else zport_a->regs[5] &= ~DTR; if (mctrl & TIOCM_RTS) zport_a->regs[5] |= RTS; else zport_a->regs[5] &= ~RTS; write_zsreg(zport_a, R5, zport_a->regs[5]); } /* Rarely modified, so don't poke at hardware unless necessary. */ oldloop = zport->regs[14]; newloop = oldloop; if (mctrl & TIOCM_LOOP) newloop |= LOOPBAK; else newloop &= ~LOOPBAK; if (newloop != oldloop) { zport->regs[14] = newloop; write_zsreg(zport, R14, zport->regs[14]); } spin_unlock(&scc->zlock); } static void zs_raw_stop_tx(struct zs_port *zport) { write_zsreg(zport, R0, RES_Tx_P); zport->tx_stopped = 1; } static void zs_stop_tx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); zs_raw_stop_tx(zport); spin_unlock(&scc->zlock); } static void zs_raw_transmit_chars(struct zs_port *); static void zs_start_tx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); if (zport->tx_stopped) { zs_transmit_drain(zport, 0); zport->tx_stopped = 0; zs_raw_transmit_chars(zport); } spin_unlock(&scc->zlock); } static void zs_stop_rx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; spin_lock(&scc->zlock); zport->regs[15] &= ~BRKIE; zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB); zport->regs[1] |= RxINT_DISAB; if (zport != zport_a) { /* A-side DCD tracks RI and SYNC tracks DSR. */ zport_a->regs[15] &= ~(DCDIE | SYNCIE); write_zsreg(zport_a, R15, zport_a->regs[15]); if (!(zport_a->regs[15] & BRKIE)) { zport_a->regs[1] &= ~EXT_INT_ENAB; write_zsreg(zport_a, R1, zport_a->regs[1]); } /* This-side DCD tracks DCD and CTS tracks CTS. */ zport->regs[15] &= ~(DCDIE | CTSIE); zport->regs[1] &= ~EXT_INT_ENAB; } else { /* DCD tracks RI and SYNC tracks DSR for the B side. */ if (!(zport->regs[15] & (DCDIE | SYNCIE))) zport->regs[1] &= ~EXT_INT_ENAB; } write_zsreg(zport, R15, zport->regs[15]); write_zsreg(zport, R1, zport->regs[1]); spin_unlock(&scc->zlock); } static void zs_enable_ms(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; if (zport == zport_a) return; spin_lock(&scc->zlock); /* Clear Ext interrupts if not being handled already. */ if (!(zport_a->regs[1] & EXT_INT_ENAB)) write_zsreg(zport_a, R0, RES_EXT_INT); /* A-side DCD tracks RI and SYNC tracks DSR. */ zport_a->regs[1] |= EXT_INT_ENAB; zport_a->regs[15] |= DCDIE | SYNCIE; /* This-side DCD tracks DCD and CTS tracks CTS. */ zport->regs[15] |= DCDIE | CTSIE; zs_raw_xor_mctrl(zport); write_zsreg(zport_a, R1, zport_a->regs[1]); write_zsreg(zport_a, R15, zport_a->regs[15]); write_zsreg(zport, R15, zport->regs[15]); spin_unlock(&scc->zlock); } static void zs_break_ctl(struct uart_port *uport, int break_state) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); if (break_state == -1) zport->regs[5] |= SND_BRK; else zport->regs[5] &= ~SND_BRK; write_zsreg(zport, R5, zport->regs[5]); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Interrupt handling routines. */ #define Rx_BRK 0x0100 /* BREAK event software flag. */ #define Rx_SYS 0x0200 /* SysRq event software flag. */ static void zs_receive_chars(struct zs_port *zport) { struct uart_port *uport = &zport->port; struct zs_scc *scc = zport->scc; struct uart_icount *icount; unsigned int avail, status, ch, flag; int count; for (count = 16; count; count--) { spin_lock(&scc->zlock); avail = read_zsreg(zport, R0) & Rx_CH_AV; spin_unlock(&scc->zlock); if (!avail) break; spin_lock(&scc->zlock); status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR); ch = read_zsdata(zport); spin_unlock(&scc->zlock); flag = TTY_NORMAL; icount = &uport->icount; icount->rx++; /* Handle the null char got when BREAK is removed. */ if (!ch) status |= zport->tty_break; if (unlikely(status & (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) { zport->tty_break = 0; /* Reset the error indication. */ if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) { spin_lock(&scc->zlock); write_zsreg(zport, R0, ERR_RES); spin_unlock(&scc->zlock); } if (status & (Rx_SYS | Rx_BRK)) { icount->brk++; /* SysRq discards the null char. */ if (status & Rx_SYS) continue; } else if (status & FRM_ERR) icount->frame++; else if (status & PAR_ERR) icount->parity++; if (status & Rx_OVR) icount->overrun++; status &= uport->read_status_mask; if (status & Rx_BRK) flag = TTY_BREAK; else if (status & FRM_ERR) flag = TTY_FRAME; else if (status & PAR_ERR) flag = TTY_PARITY; } if (uart_handle_sysrq_char(uport, ch)) continue; uart_insert_char(uport, status, Rx_OVR, ch, flag); } tty_flip_buffer_push(uport->state->port.tty); } static void zs_raw_transmit_chars(struct zs_port *zport) { struct circ_buf *xmit = &zport->port.state->xmit; /* XON/XOFF chars. */ if (zport->port.x_char) { write_zsdata(zport, zport->port.x_char); zport->port.icount.tx++; zport->port.x_char = 0; return; } /* If nothing to do or stopped or hardware stopped. */ if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) { zs_raw_stop_tx(zport); return; } /* Send char. */ write_zsdata(zport, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); zport->port.icount.tx++; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&zport->port); /* Are we are done? */ if (uart_circ_empty(xmit)) zs_raw_stop_tx(zport); } static void zs_transmit_chars(struct zs_port *zport) { struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); zs_raw_transmit_chars(zport); spin_unlock(&scc->zlock); } static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) { struct uart_port *uport = &zport->port; struct zs_scc *scc = zport->scc; unsigned int delta; u8 status, brk; spin_lock(&scc->zlock); /* Get status from Read Register 0. */ status = read_zsreg(zport, R0); if (zport->regs[15] & BRKIE) { brk = status & BRK_ABRT; if (brk && !zport->brk) { spin_unlock(&scc->zlock); if (uart_handle_break(uport)) zport->tty_break = Rx_SYS; else zport->tty_break = Rx_BRK; spin_lock(&scc->zlock); } zport->brk = brk; } if (zport != zport_a) { delta = zs_raw_xor_mctrl(zport); spin_unlock(&scc->zlock); if (delta & TIOCM_CTS) uart_handle_cts_change(uport, zport->mctrl & TIOCM_CTS); if (delta & TIOCM_CAR) uart_handle_dcd_change(uport, zport->mctrl & TIOCM_CAR); if (delta & TIOCM_RNG) uport->icount.dsr++; if (delta & TIOCM_DSR) uport->icount.rng++; if (delta) wake_up_interruptible(&uport->state->port.delta_msr_wait); spin_lock(&scc->zlock); } /* Clear the status condition... */ write_zsreg(zport, R0, RES_EXT_INT); spin_unlock(&scc->zlock); } /* * This is the Z85C30 driver's generic interrupt routine. */ static irqreturn_t zs_interrupt(int irq, void *dev_id) { struct zs_scc *scc = dev_id; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; struct zs_port *zport_b = &scc->zport[ZS_CHAN_B]; irqreturn_t status = IRQ_NONE; u8 zs_intreg; int count; /* * NOTE: The read register 3, which holds the irq status, * does so for both channels on each chip. Although * the status value itself must be read from the A * channel and is only valid when read from channel A. * Yes... broken hardware... */ for (count = 16; count; count--) { spin_lock(&scc->zlock); zs_intreg = read_zsreg(zport_a, R3); spin_unlock(&scc->zlock); if (!zs_intreg) break; /* * We do not like losing characters, so we prioritise * interrupt sources a little bit differently than * the SCC would, was it allowed to. */ if (zs_intreg & CHBRxIP) zs_receive_chars(zport_b); if (zs_intreg & CHARxIP) zs_receive_chars(zport_a); if (zs_intreg & CHBEXT) zs_status_handle(zport_b, zport_a); if (zs_intreg & CHAEXT) zs_status_handle(zport_a, zport_a); if (zs_intreg & CHBTxIP) zs_transmit_chars(zport_b); if (zs_intreg & CHATxIP) zs_transmit_chars(zport_a); status = IRQ_HANDLED; } return status; } /* * Finally, routines used to initialize the serial port. */ static int zs_startup(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; int irq_guard; int ret; irq_guard = atomic_add_return(1, &scc->irq_guard); if (irq_guard == 1) { ret = request_irq(zport->port.irq, zs_interrupt, IRQF_SHARED, "scc", scc); if (ret) { atomic_add(-1, &scc->irq_guard); printk(KERN_ERR "zs: can't get irq %d\n", zport->port.irq); return ret; } } spin_lock_irqsave(&scc->zlock, flags); /* Clear the receive FIFO. */ zs_receive_drain(zport); /* Clear the interrupt registers. */ write_zsreg(zport, R0, ERR_RES); write_zsreg(zport, R0, RES_Tx_P); /* But Ext only if not being handled already. */ if (!(zport->regs[1] & EXT_INT_ENAB)) write_zsreg(zport, R0, RES_EXT_INT); /* Finally, enable sequencing and interrupts. */ zport->regs[1] &= ~RxINT_MASK; zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; zport->regs[3] |= RxENABLE; zport->regs[15] |= BRKIE; write_zsreg(zport, R1, zport->regs[1]); write_zsreg(zport, R3, zport->regs[3]); write_zsreg(zport, R5, zport->regs[5]); write_zsreg(zport, R15, zport->regs[15]); /* Record the current state of RR0. */ zport->mctrl = zs_raw_get_mctrl(zport); zport->brk = read_zsreg(zport, R0) & BRK_ABRT; zport->tx_stopped = 1; spin_unlock_irqrestore(&scc->zlock, flags); return 0; } static void zs_shutdown(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; int irq_guard; spin_lock_irqsave(&scc->zlock, flags); zport->regs[3] &= ~RxENABLE; write_zsreg(zport, R5, zport->regs[5]); write_zsreg(zport, R3, zport->regs[3]); spin_unlock_irqrestore(&scc->zlock, flags); irq_guard = atomic_add_return(-1, &scc->irq_guard); if (!irq_guard) free_irq(zport->port.irq, scc); } static void zs_reset(struct zs_port *zport) { struct zs_scc *scc = zport->scc; int irq; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); if (!scc->initialised) { /* Reset the pointer first, just in case... */ read_zsreg(zport, R0); /* And let the current transmission finish. */ zs_line_drain(zport, irq); write_zsreg(zport, R9, FHWRES); udelay(10); write_zsreg(zport, R9, 0); scc->initialised = 1; } load_zsregs(zport, zport->regs, irq); spin_unlock_irqrestore(&scc->zlock, flags); } static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, struct ktermios *old_termios) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; int irq; unsigned int baud, brg; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); /* Byte size. */ zport->regs[3] &= ~RxNBITS_MASK; zport->regs[5] &= ~TxNBITS_MASK; switch (termios->c_cflag & CSIZE) { case CS5: zport->regs[3] |= Rx5; zport->regs[5] |= Tx5; break; case CS6: zport->regs[3] |= Rx6; zport->regs[5] |= Tx6; break; case CS7: zport->regs[3] |= Rx7; zport->regs[5] |= Tx7; break; case CS8: default: zport->regs[3] |= Rx8; zport->regs[5] |= Tx8; break; } /* Parity and stop bits. */ zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN); if (termios->c_cflag & CSTOPB) zport->regs[4] |= SB2; else zport->regs[4] |= SB1; if (termios->c_cflag & PARENB) zport->regs[4] |= PAR_ENA; if (!(termios->c_cflag & PARODD)) zport->regs[4] |= PAR_EVEN; switch (zport->clk_mode) { case 64: zport->regs[4] |= X64CLK; break; case 32: zport->regs[4] |= X32CLK; break; case 16: zport->regs[4] |= X16CLK; break; case 1: zport->regs[4] |= X1CLK; break; default: BUG(); } baud = uart_get_baud_rate(uport, termios, old_termios, 0, uport->uartclk / zport->clk_mode / 4); brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode); zport->regs[12] = brg & 0xff; zport->regs[13] = (brg >> 8) & 0xff; uart_update_timeout(uport, termios->c_cflag, baud); uport->read_status_mask = Rx_OVR; if (termios->c_iflag & INPCK) uport->read_status_mask |= FRM_ERR | PAR_ERR; if (termios->c_iflag & (BRKINT | PARMRK)) uport->read_status_mask |= Rx_BRK; uport->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) uport->ignore_status_mask |= FRM_ERR | PAR_ERR; if (termios->c_iflag & IGNBRK) { uport->ignore_status_mask |= Rx_BRK; if (termios->c_iflag & IGNPAR) uport->ignore_status_mask |= Rx_OVR; } if (termios->c_cflag & CREAD) zport->regs[3] |= RxENABLE; else zport->regs[3] &= ~RxENABLE; if (zport != zport_a) { if (!(termios->c_cflag & CLOCAL)) { zport->regs[15] |= DCDIE; } else zport->regs[15] &= ~DCDIE; if (termios->c_cflag & CRTSCTS) { zport->regs[15] |= CTSIE; } else zport->regs[15] &= ~CTSIE; zs_raw_xor_mctrl(zport); } /* Load up the new values. */ load_zsregs(zport, zport->regs, irq); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Hack alert! * Required solely so that the initial PROM-based console * works undisturbed in parallel with this one. */ static void zs_pm(struct uart_port *uport, unsigned int state, unsigned int oldstate) { struct zs_port *zport = to_zport(uport); if (state < 3) zport->regs[5] |= TxENAB; else zport->regs[5] &= ~TxENAB; write_zsreg(zport, R5, zport->regs[5]); } static const char *zs_type(struct uart_port *uport) { return "Z85C30 SCC"; } static void zs_release_port(struct uart_port *uport) { iounmap(uport->membase); uport->membase = 0; release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); } static int zs_map_port(struct uart_port *uport) { if (!uport->membase) uport->membase = ioremap_nocache(uport->mapbase, ZS_CHAN_IO_SIZE); if (!uport->membase) { printk(KERN_ERR "zs: Cannot map MMIO\n"); return -ENOMEM; } return 0; } static int zs_request_port(struct uart_port *uport) { int ret; if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) { printk(KERN_ERR "zs: Unable to reserve MMIO resource\n"); return -EBUSY; } ret = zs_map_port(uport); if (ret) { release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); return ret; } return 0; } static void zs_config_port(struct uart_port *uport, int flags) { struct zs_port *zport = to_zport(uport); if (flags & UART_CONFIG_TYPE) { if (zs_request_port(uport)) return; uport->type = PORT_ZS; zs_reset(zport); } } static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser) { struct zs_port *zport = to_zport(uport); int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS) ret = -EINVAL; if (ser->irq != uport->irq) ret = -EINVAL; if (ser->baud_base != uport->uartclk / zport->clk_mode / 4) ret = -EINVAL; return ret; } static struct uart_ops zs_ops = { .tx_empty = zs_tx_empty, .set_mctrl = zs_set_mctrl, .get_mctrl = zs_get_mctrl, .stop_tx = zs_stop_tx, .start_tx = zs_start_tx, .stop_rx = zs_stop_rx, .enable_ms = zs_enable_ms, .break_ctl = zs_break_ctl, .startup = zs_startup, .shutdown = zs_shutdown, .set_termios = zs_set_termios, .pm = zs_pm, .type = zs_type, .release_port = zs_release_port, .request_port = zs_request_port, .config_port = zs_config_port, .verify_port = zs_verify_port, }; /* * Initialize Z85C30 port structures. */ static int __init zs_probe_sccs(void) { static int probed; struct zs_parms zs_parms; int chip, side, irq; int n_chips = 0; int i; if (probed) return 0; irq = dec_interrupt[DEC_IRQ_SCC0]; if (irq >= 0) { zs_parms.scc[n_chips] = IOASIC_SCC0; zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0]; n_chips++; } irq = dec_interrupt[DEC_IRQ_SCC1]; if (irq >= 0) { zs_parms.scc[n_chips] = IOASIC_SCC1; zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1]; n_chips++; } if (!n_chips) return -ENXIO; probed = 1; for (chip = 0; chip < n_chips; chip++) { spin_lock_init(&zs_sccs[chip].zlock); for (side = 0; side < ZS_NUM_CHAN; side++) { struct zs_port *zport = &zs_sccs[chip].zport[side]; struct uart_port *uport = &zport->port; zport->scc = &zs_sccs[chip]; zport->clk_mode = 16; uport->irq = zs_parms.irq[chip]; uport->uartclk = ZS_CLOCK; uport->fifosize = 1; uport->iotype = UPIO_MEM; uport->flags = UPF_BOOT_AUTOCONF; uport->ops = &zs_ops; uport->line = chip * ZS_NUM_CHAN + side; uport->mapbase = dec_kn_slot_base + zs_parms.scc[chip] + (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE; for (i = 0; i < ZS_NUM_REGS; i++) zport->regs[i] = zs_init_regs[i]; } } return 0; } #ifdef CONFIG_SERIAL_ZS_CONSOLE static void zs_console_putchar(struct uart_port *uport, int ch) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; int irq; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); if (zs_transmit_drain(zport, irq)) write_zsdata(zport, ch); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... */ static void zs_console_write(struct console *co, const char *s, unsigned int count) { int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; struct zs_port *zport = &zs_sccs[chip].zport[side]; struct zs_scc *scc = zport->scc; unsigned long flags; u8 txint, txenb; int irq; /* Disable transmit interrupts and enable the transmitter. */ spin_lock_irqsave(&scc->zlock, flags); txint = zport->regs[1]; txenb = zport->regs[5]; if (txint & TxINT_ENAB) { zport->regs[1] = txint & ~TxINT_ENAB; write_zsreg(zport, R1, zport->regs[1]); } if (!(txenb & TxENAB)) { zport->regs[5] = txenb | TxENAB; write_zsreg(zport, R5, zport->regs[5]); } spin_unlock_irqrestore(&scc->zlock, flags); uart_console_write(&zport->port, s, count, zs_console_putchar); /* Restore transmit interrupts and the transmitter enable. */ spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); zs_line_drain(zport, irq); if (!(txenb & TxENAB)) { zport->regs[5] &= ~TxENAB; write_zsreg(zport, R5, zport->regs[5]); } if (txint & TxINT_ENAB) { zport->regs[1] |= TxINT_ENAB; write_zsreg(zport, R1, zport->regs[1]); } spin_unlock_irqrestore(&scc->zlock, flags); } /* * Setup serial console baud/bits/parity. We do two things here: * - construct a cflag setting for the first uart_open() * - initialise the serial port * Return non-zero if we didn't find a serial port. */ static int __init zs_console_setup(struct console *co, char *options) { int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; struct zs_port *zport = &zs_sccs[chip].zport[side]; struct uart_port *uport = &zport->port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; ret = zs_map_port(uport); if (ret) return ret; zs_reset(zport); zs_pm(uport, 0, -1); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(uport, co, baud, parity, bits, flow); } static struct uart_driver zs_reg; static struct console zs_console = { .name = "ttyS", .write = zs_console_write, .device = uart_console_device, .setup = zs_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &zs_reg, }; /* * Register console. */ static int __init zs_serial_console_init(void) { int ret; ret = zs_probe_sccs(); if (ret) return ret; register_console(&zs_console); return 0; } console_initcall(zs_serial_console_init); #define SERIAL_ZS_CONSOLE &zs_console #else #define SERIAL_ZS_CONSOLE NULL #endif /* CONFIG_SERIAL_ZS_CONSOLE */ static struct uart_driver zs_reg = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = ZS_NUM_SCCS * ZS_NUM_CHAN, .cons = SERIAL_ZS_CONSOLE, }; /* zs_init inits the driver. */ static int __init zs_init(void) { int i, ret; pr_info("%s%s\n", zs_name, zs_version); /* Find out how many Z85C30 SCCs we have. */ ret = zs_probe_sccs(); if (ret) return ret; ret = uart_register_driver(&zs_reg); if (ret) return ret; for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; struct uart_port *uport = &zport->port; if (zport->scc) uart_add_one_port(&zs_reg, uport); } return 0; } static void __exit zs_exit(void) { int i; for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) { struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; struct uart_port *uport = &zport->port; if (zport->scc) uart_remove_one_port(&zs_reg, uport); } uart_unregister_driver(&zs_reg); } module_init(zs_init); module_exit(zs_exit);
gpl-2.0
kbc-developers/android_kernel_samsung_exynos4210jpn
fs/btrfs/volumes.c
658
94609
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/random.h> #include <linux/iocontext.h> #include <linux/capability.h> #include <asm/div64.h> #include "compat.h" #include "ctree.h" #include "extent_map.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "volumes.h" #include "async-thread.h" static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); static int btrfs_relocate_sys_chunks(struct btrfs_root *root); static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); static void lock_chunks(struct btrfs_root *root) { mutex_lock(&root->fs_info->chunk_mutex); } static void unlock_chunks(struct btrfs_root *root) { mutex_unlock(&root->fs_info->chunk_mutex); } static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, struct btrfs_device, dev_list); list_del(&device->dev_list); kfree(device->name); kfree(device); } kfree(fs_devices); } int btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; while (!list_empty(&fs_uuids)) { fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices, list); list_del(&fs_devices->list); free_fs_devices(fs_devices); } return 0; } static noinline struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) { struct btrfs_device *dev; list_for_each_entry(dev, head, dev_list) { if (dev->devid == devid && (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { return dev; } } return NULL; } static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) { struct btrfs_fs_devices *fs_devices; list_for_each_entry(fs_devices, &fs_uuids, list) { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) return fs_devices; } return NULL; } static void requeue_list(struct btrfs_pending_bios *pending_bios, struct bio *head, struct bio *tail) { struct bio *old_head; old_head = pending_bios->head; pending_bios->head = head; if (pending_bios->tail) tail->bi_next = old_head; else pending_bios->tail = tail; } /* * we try to collect pending bios for a device so we don't get a large * number of procs sending bios down to the same device. This greatly * improves the schedulers ability to collect and merge the bios. * * But, it also turns into a long list of bios to process and that is sure * to eventually make the worker thread block. The solution here is to * make some progress and then put this work struct back at the end of * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ static noinline int run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; struct btrfs_fs_info *fs_info; struct btrfs_pending_bios *pending_bios; struct bio *tail; struct bio *cur; int again = 0; unsigned long num_run; unsigned long batch_run = 0; unsigned long limit; unsigned long last_waited = 0; int force_reg = 0; struct blk_plug plug; /* * this function runs all the bios we've collected for * a particular device. We don't want to wander off to * another device without first sending all of these down. * So, setup a plug here and finish it off before we return */ blk_start_plug(&plug); bdi = blk_get_backing_dev_info(device->bdev); fs_info = device->dev_root->fs_info; limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; loop: spin_lock(&device->io_lock); loop_lock: num_run = 0; /* take all the bios off the list at once and process them * later on (without the lock held). But, remember the * tail and other pointers so the bios can be properly reinserted * into the list if we hit congestion */ if (!force_reg && device->pending_sync_bios.head) { pending_bios = &device->pending_sync_bios; force_reg = 1; } else { pending_bios = &device->pending_bios; force_reg = 0; } pending = pending_bios->head; tail = pending_bios->tail; WARN_ON(pending && !tail); /* * if pending was null this time around, no bios need processing * at all and we can stop. Otherwise it'll loop back up again * and do an additional check so no bios are missed. * * device->running_pending is used to synchronize with the * schedule_bio code. */ if (device->pending_sync_bios.head == NULL && device->pending_bios.head == NULL) { again = 0; device->running_pending = 0; } else { again = 1; device->running_pending = 1; } pending_bios->head = NULL; pending_bios->tail = NULL; spin_unlock(&device->io_lock); while (pending) { rmb(); /* we want to work on both lists, but do more bios on the * sync list than the regular list */ if ((num_run > 32 && pending_bios != &device->pending_sync_bios && device->pending_sync_bios.head) || (num_run > 64 && pending_bios == &device->pending_sync_bios && device->pending_bios.head)) { spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); goto loop_lock; } cur = pending; pending = pending->bi_next; cur->bi_next = NULL; atomic_dec(&fs_info->nr_async_bios); if (atomic_read(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); BUG_ON(atomic_read(&cur->bi_cnt) == 0); submit_bio(cur->bi_rw, cur); num_run++; batch_run++; if (need_resched()) cond_resched(); /* * we made progress, there is more work to do and the bdi * is now congested. Back off and let other work structs * run instead */ if (pending && bdi_write_congested(bdi) && batch_run > 8 && fs_info->fs_devices->open_devices > 1) { struct io_context *ioc; ioc = current->io_context; /* * the main goal here is that we don't want to * block if we're going to be able to submit * more requests without blocking. * * This code does two great things, it pokes into * the elevator code from a filesystem _and_ * it makes assumptions about how batching works. */ if (ioc && ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + HZ/50UL) && (last_waited == 0 || ioc->last_waited == last_waited)) { /* * we want to go through our batch of * requests and stop. So, we copy out * the ioc->last_waited time and test * against it before looping */ last_waited = ioc->last_waited; if (need_resched()) cond_resched(); continue; } spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); device->running_pending = 1; spin_unlock(&device->io_lock); btrfs_requeue_work(&device->work); goto done; } } cond_resched(); if (again) goto loop; spin_lock(&device->io_lock); if (device->pending_bios.head || device->pending_sync_bios.head) goto loop_lock; spin_unlock(&device->io_lock); done: blk_finish_plug(&plug); return 0; } static void pending_bios_fn(struct btrfs_work *work) { struct btrfs_device *device; device = container_of(work, struct btrfs_device, work); run_scheduled_bios(device); } static noinline int device_list_add(const char *path, struct btrfs_super_block *disk_super, u64 devid, struct btrfs_fs_devices **fs_devices_ret) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices; u64 found_transid = btrfs_super_generation(disk_super); char *name; fs_devices = find_fsid(disk_super->fsid); if (!fs_devices) { fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!fs_devices) return -ENOMEM; INIT_LIST_HEAD(&fs_devices->devices); INIT_LIST_HEAD(&fs_devices->alloc_list); list_add(&fs_devices->list, &fs_uuids); memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); fs_devices->latest_devid = devid; fs_devices->latest_trans = found_transid; mutex_init(&fs_devices->device_list_mutex); device = NULL; } else { device = __find_device(&fs_devices->devices, devid, disk_super->dev_item.uuid); } if (!device) { if (fs_devices->opened) return -EBUSY; device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) { /* we can safely leave the fs_devices entry around */ return -ENOMEM; } device->devid = devid; device->work.func = pending_bios_fn; memcpy(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); spin_lock_init(&device->io_lock); device->name = kstrdup(path, GFP_NOFS); if (!device->name) { kfree(device); return -ENOMEM; } INIT_LIST_HEAD(&device->dev_alloc_list); mutex_lock(&fs_devices->device_list_mutex); list_add_rcu(&device->dev_list, &fs_devices->devices); mutex_unlock(&fs_devices->device_list_mutex); device->fs_devices = fs_devices; fs_devices->num_devices++; } else if (!device->name || strcmp(device->name, path)) { name = kstrdup(path, GFP_NOFS); if (!name) return -ENOMEM; kfree(device->name); device->name = name; if (device->missing) { fs_devices->missing_devices--; device->missing = 0; } } if (found_transid > fs_devices->latest_trans) { fs_devices->latest_devid = devid; fs_devices->latest_trans = found_transid; } *fs_devices_ret = fs_devices; return 0; } static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) { struct btrfs_fs_devices *fs_devices; struct btrfs_device *device; struct btrfs_device *orig_dev; fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!fs_devices) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&fs_devices->devices); INIT_LIST_HEAD(&fs_devices->alloc_list); INIT_LIST_HEAD(&fs_devices->list); mutex_init(&fs_devices->device_list_mutex); fs_devices->latest_devid = orig->latest_devid; fs_devices->latest_trans = orig->latest_trans; memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); /* We have held the volume lock, it is safe to get the devices. */ list_for_each_entry(orig_dev, &orig->devices, dev_list) { device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) goto error; device->name = kstrdup(orig_dev->name, GFP_NOFS); if (!device->name) { kfree(device); goto error; } device->devid = orig_dev->devid; device->work.func = pending_bios_fn; memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_list); INIT_LIST_HEAD(&device->dev_alloc_list); list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; } return fs_devices; error: free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device, *next; mutex_lock(&uuid_mutex); again: /* This is the initialized path, it is safe to release the devices. */ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (device->in_fs_metadata) continue; if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } if (device->writeable) { list_del_init(&device->dev_alloc_list); device->writeable = 0; fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; kfree(device->name); kfree(device); } if (fs_devices->seed) { fs_devices = fs_devices->seed; goto again; } mutex_unlock(&uuid_mutex); return 0; } static void __free_device(struct work_struct *work) { struct btrfs_device *device; device = container_of(work, struct btrfs_device, rcu_work); if (device->bdev) blkdev_put(device->bdev, device->mode); kfree(device->name); kfree(device); } static void free_device(struct rcu_head *head) { struct btrfs_device *device; device = container_of(head, struct btrfs_device, rcu); INIT_WORK(&device->rcu_work, __free_device); schedule_work(&device->rcu_work); } static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; if (--fs_devices->opened > 0) return 0; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { struct btrfs_device *new_device; if (device->bdev) fs_devices->open_devices--; if (device->writeable) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } if (device->can_discard) fs_devices->num_can_discard--; new_device = kmalloc(sizeof(*new_device), GFP_NOFS); BUG_ON(!new_device); memcpy(new_device, device, sizeof(*new_device)); new_device->name = kstrdup(device->name, GFP_NOFS); BUG_ON(device->name && !new_device->name); new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; new_device->can_discard = 0; spin_lock_init(&new_device->io_lock); list_replace_rcu(&device->dev_list, &new_device->dev_list); call_rcu(&device->rcu, free_device); } mutex_unlock(&fs_devices->device_list_mutex); WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; fs_devices->seeding = 0; return 0; } int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_fs_devices *seed_devices = NULL; int ret; mutex_lock(&uuid_mutex); ret = __btrfs_close_devices(fs_devices); if (!fs_devices->opened) { seed_devices = fs_devices->seed; fs_devices->seed = NULL; } mutex_unlock(&uuid_mutex); while (seed_devices) { fs_devices = seed_devices; seed_devices = fs_devices->seed; __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); } /* * Wait for rcu kworkers under __btrfs_close_devices * to finish all blkdev_puts so device is really * free when umount is done. */ rcu_barrier(); return ret; } static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { struct request_queue *q; struct block_device *bdev; struct list_head *head = &fs_devices->devices; struct btrfs_device *device; struct block_device *latest_bdev = NULL; struct buffer_head *bh; struct btrfs_super_block *disk_super; u64 latest_devid = 0; u64 latest_transid = 0; u64 devid; int seeding = 1; int ret = 0; flags |= FMODE_EXCL; list_for_each_entry(device, head, dev_list) { if (device->bdev) continue; if (!device->name) continue; bdev = blkdev_get_by_path(device->name, flags, holder); if (IS_ERR(bdev)) { printk(KERN_INFO "open %s failed\n", device->name); goto error; } set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_brelse; if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) goto error_brelse; device->generation = btrfs_super_generation(disk_super); if (!latest_transid || device->generation > latest_transid) { latest_devid = devid; latest_transid = device->generation; latest_bdev = bdev; } if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { device->writeable = 0; } else { device->writeable = !bdev_read_only(bdev); seeding = 0; } q = bdev_get_queue(bdev); if (blk_queue_discard(q)) { device->can_discard = 1; fs_devices->num_can_discard++; } device->bdev = bdev; device->in_fs_metadata = 0; device->mode = flags; if (!blk_queue_nonrot(bdev_get_queue(bdev))) fs_devices->rotating = 1; fs_devices->open_devices++; if (device->writeable) { fs_devices->rw_devices++; list_add(&device->dev_alloc_list, &fs_devices->alloc_list); } brelse(bh); continue; error_brelse: brelse(bh); error_close: blkdev_put(bdev, flags); error: continue; } if (fs_devices->open_devices == 0) { ret = -EIO; goto out; } fs_devices->seeding = seeding; fs_devices->opened = 1; fs_devices->latest_bdev = latest_bdev; fs_devices->latest_devid = latest_devid; fs_devices->latest_trans = latest_transid; fs_devices->total_rw_bytes = 0; out: return ret; } int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { int ret; mutex_lock(&uuid_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; } else { ret = __btrfs_open_devices(fs_devices, flags, holder); } mutex_unlock(&uuid_mutex); return ret; } int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, struct btrfs_fs_devices **fs_devices_ret) { struct btrfs_super_block *disk_super; struct block_device *bdev; struct buffer_head *bh; int ret; u64 devid; u64 transid; mutex_lock(&uuid_mutex); flags |= FMODE_EXCL; bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto error; } ret = set_blocksize(bdev, 4096); if (ret) goto error_close; bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) printk(KERN_INFO "device label %s ", disk_super->label); else printk(KERN_INFO "device fsid %pU ", disk_super->fsid); printk(KERN_CONT "devid %llu transid %llu %s\n", (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); brelse(bh); error_close: blkdev_put(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; } /* helper to account the used device space in the range */ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 extent_end; int ret; int slot; struct extent_buffer *l; *length = 0; if (start >= device->total_bytes) return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (key.offset <= start && extent_end > end) { *length = end - start + 1; break; } else if (key.offset <= start && extent_end > start) *length += extent_end - start; else if (key.offset > start && extent_end <= end) *length += extent_end - key.offset; else if (key.offset > start && key.offset <= end) { *length += end - key.offset + 1; break; } else if (key.offset > end) break; next: path->slots[0]++; } ret = 0; out: btrfs_free_path(path); return ret; } /* * find_free_dev_extent - find free space in the specified device * @trans: transaction handler * @device: the device which we search the free space in * @num_bytes: the size of the free space that we need * @start: store the start of the free space. * @len: the size of the free space. that we find, or the size of the max * free space if we don't find suitable free space * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents * * @start is used to store the start of the free space if we find. But if we * don't find suitable free space, it will be used to store the start position * of the max free space. * * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. */ int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 hole_size; u64 max_hole_start; u64 max_hole_size; u64 extent_end; u64 search_start; u64 search_end = device->total_bytes; int ret; int slot; struct extent_buffer *l; /* FIXME use last free of some kind */ /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ search_start = max(root->fs_info->alloc_start, 1024ull * 1024); max_hole_start = search_start; max_hole_size = 0; if (search_start >= search_end) { ret = -ENOSPC; goto error; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto error; } path->reada = 2; key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; if (key.offset > search_start) { hole_size = key.offset - search_start; if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* * If this free space is greater than which we need, * it must be the max free space that we have found * until now, so max_hole_start must point to the start * of this free space and the length of this free space * is stored in max_hole_size. Thus, we return * max_hole_start and max_hole_size and go back to the * caller. */ if (hole_size >= num_bytes) { ret = 0; goto out; } } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (extent_end > search_start) search_start = extent_end; next: path->slots[0]++; cond_resched(); } hole_size = search_end- search_start; if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* See above. */ if (hole_size < num_bytes) ret = -ENOSPC; else ret = 0; out: btrfs_free_path(path); error: *start = max_hole_start; if (len) *len = max_hole_size; return ret; } static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 start) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->dev_root; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf = NULL; struct btrfs_dev_extent *extent = NULL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); if (ret) goto out; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } BUG_ON(ret); if (device->bytes_used > 0) device->bytes_used -= btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); return ret; } int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 start, u64 num_bytes) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *extent; struct extent_buffer *leaf; struct btrfs_key key; WARN_ON(!device->in_fs_metadata); path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); BUG_ON(ret); leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); btrfs_set_dev_extent_length(leaf, extent, num_bytes); btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); return ret; } static noinline int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) { struct btrfs_path *path; int ret; struct btrfs_key key; struct btrfs_chunk *chunk; struct btrfs_key found_key; path = btrfs_alloc_path(); BUG_ON(!path); key.objectid = objectid; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { *offset = 0; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != objectid) *offset = 0; else { chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_chunk); *offset = found_key.offset + btrfs_chunk_length(path->nodes[0], chunk); } } ret = 0; error: btrfs_free_path(path); return ret; } static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) { int ret; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, BTRFS_DEV_ITEM_KEY); if (ret) { *objectid = 1; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); *objectid = found_key.offset + 1; } ret = 0; error: btrfs_free_path(path); return ret; } /* * the device information is stored in the chunk root * the btrfs_device struct should be fully filled in */ int btrfs_add_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*dev_item)); if (ret) goto out; leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_generation(leaf, dev_item, 0); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); btrfs_set_device_group(leaf, dev_item, 0); btrfs_set_device_seek_speed(leaf, dev_item, 0); btrfs_set_device_bandwidth(leaf, dev_item, 0); btrfs_set_device_start_offset(leaf, dev_item, 0); ptr = (unsigned long)btrfs_device_uuid(dev_item); write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); ptr = (unsigned long)btrfs_device_fsid(dev_item); write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_free_path(path); return ret; } static int btrfs_rm_dev_item(struct btrfs_root *root, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_trans_handle *trans; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; lock_chunks(root); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); if (ret) goto out; out: btrfs_free_path(path); unlock_chunks(root); btrfs_commit_transaction(trans, root); return ret; } int btrfs_rm_device(struct btrfs_root *root, char *device_path) { struct btrfs_device *device; struct btrfs_device *next_device; struct block_device *bdev; struct buffer_head *bh = NULL; struct btrfs_super_block *disk_super; struct btrfs_fs_devices *cur_devices; u64 all_avail; u64 devid; u64 num_devices; u8 *dev_uuid; int ret = 0; bool clear_super = false; mutex_lock(&uuid_mutex); mutex_lock(&root->fs_info->volume_mutex); all_avail = root->fs_info->avail_data_alloc_bits | root->fs_info->avail_system_alloc_bits | root->fs_info->avail_metadata_alloc_bits; if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && root->fs_info->fs_devices->num_devices <= 4) { printk(KERN_ERR "btrfs: unable to go below four devices " "on raid10\n"); ret = -EINVAL; goto out; } if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && root->fs_info->fs_devices->num_devices <= 2) { printk(KERN_ERR "btrfs: unable to go below two " "devices on raid1\n"); ret = -EINVAL; goto out; } if (strcmp(device_path, "missing") == 0) { struct list_head *devices; struct btrfs_device *tmp; device = NULL; devices = &root->fs_info->fs_devices->devices; /* * It is safe to read the devices since the volume_mutex * is held. */ list_for_each_entry(tmp, devices, dev_list) { if (tmp->in_fs_metadata && !tmp->bdev) { device = tmp; break; } } bdev = NULL; bh = NULL; disk_super = NULL; if (!device) { printk(KERN_ERR "btrfs: no missing devices found to " "remove\n"); goto out; } } else { bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; } set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); dev_uuid = disk_super->dev_item.uuid; device = btrfs_find_device(root, devid, dev_uuid, disk_super->fsid); if (!device) { ret = -ENOENT; goto error_brelse; } } if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { printk(KERN_ERR "btrfs: unable to remove the only writeable " "device\n"); ret = -EINVAL; goto error_brelse; } if (device->writeable) { lock_chunks(root); list_del_init(&device->dev_alloc_list); unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; clear_super = true; } ret = btrfs_shrink_device(device, 0); if (ret) goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) goto error_undo; device->in_fs_metadata = 0; btrfs_scrub_cancel_dev(root, device); /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. */ cur_devices = device->fs_devices; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_del_rcu(&device->dev_list); device->fs_devices->num_devices--; if (device->missing) root->fs_info->fs_devices->missing_devices--; next_device = list_entry(root->fs_info->fs_devices->devices.next, struct btrfs_device, dev_list); if (device->bdev == root->fs_info->sb->s_bdev) root->fs_info->sb->s_bdev = next_device->bdev; if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) device->fs_devices->open_devices--; call_rcu(&device->rcu, free_device); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); if (cur_devices->open_devices == 0) { struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { if (fs_devices->seed == cur_devices) break; fs_devices = fs_devices->seed; } fs_devices->seed = cur_devices->seed; cur_devices->seed = NULL; lock_chunks(root); __btrfs_close_devices(cur_devices); unlock_chunks(root); free_fs_devices(cur_devices); } /* * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ if (clear_super) { /* make sure this device isn't detected as part of * the FS anymore */ memset(&disk_super->magic, 0, sizeof(disk_super->magic)); set_buffer_dirty(bh); sync_dirty_buffer(bh); } ret = 0; error_brelse: brelse(bh); error_close: if (bdev) blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; error_undo: if (device->writeable) { lock_chunks(root); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); unlock_chunks(root); root->fs_info->fs_devices->rw_devices++; } goto error_brelse; } /* * does all the dirty work required for changing file system's UUID. */ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_fs_devices *old_devices; struct btrfs_fs_devices *seed_devices; struct btrfs_super_block *disk_super = &root->fs_info->super_copy; struct btrfs_device *device; u64 super_flags; BUG_ON(!mutex_is_locked(&uuid_mutex)); if (!fs_devices->seeding) return -EINVAL; seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!seed_devices) return -ENOMEM; old_devices = clone_fs_devices(fs_devices); if (IS_ERR(old_devices)) { kfree(seed_devices); return PTR_ERR(old_devices); } list_add(&old_devices->list, &fs_uuids); memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); seed_devices->opened = 1; INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, synchronize_rcu); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); list_for_each_entry(device, &seed_devices->devices, dev_list) { device->fs_devices = seed_devices; } fs_devices->seeding = 0; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); super_flags = btrfs_super_flags(disk_super) & ~BTRFS_SUPER_FLAG_SEEDING; btrfs_set_super_flags(disk_super, super_flags); return 0; } /* * strore the expected generation for seed devices in device items. */ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dev_item *dev_item; struct btrfs_device *device; struct btrfs_key key; u8 fs_uuid[BTRFS_UUID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; u64 devid; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; root = root->fs_info->chunk_root; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = BTRFS_DEV_ITEM_KEY; while (1) { ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto error; leaf = path->nodes[0]; next_slot: if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret > 0) break; if (ret < 0) goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || key.type != BTRFS_DEV_ITEM_KEY) break; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE); device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); BUG_ON(!device); if (device->fs_devices->seeding) { btrfs_set_device_generation(leaf, dev_item, device->generation); btrfs_mark_buffer_dirty(leaf); } path->slots[0]++; goto next_slot; } ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_init_new_device(struct btrfs_root *root, char *device_path) { struct request_queue *q; struct btrfs_trans_handle *trans; struct btrfs_device *device; struct block_device *bdev; struct list_head *devices; struct super_block *sb = root->fs_info->sb; u64 total_bytes; int seeding_dev = 0; int ret = 0; if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; bdev = blkdev_get_by_path(device_path, FMODE_EXCL, root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (root->fs_info->fs_devices->seeding) { seeding_dev = 1; down_write(&sb->s_umount); mutex_lock(&uuid_mutex); } filemap_write_and_wait(bdev->bd_inode->i_mapping); mutex_lock(&root->fs_info->volume_mutex); devices = &root->fs_info->fs_devices->devices; /* * we have the volume lock, so we don't need the extra * device list mutex while reading the list here. */ list_for_each_entry(device, devices, dev_list) { if (device->bdev == bdev) { ret = -EEXIST; goto error; } } device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) { /* we can safely leave the fs_devices entry around */ ret = -ENOMEM; goto error; } device->name = kstrdup(device_path, GFP_NOFS); if (!device->name) { kfree(device); ret = -ENOMEM; goto error; } ret = find_next_devid(root, &device->devid); if (ret) { kfree(device->name); kfree(device); goto error; } trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { kfree(device->name); kfree(device); ret = PTR_ERR(trans); goto error; } lock_chunks(root); q = bdev_get_queue(bdev); if (blk_queue_discard(q)) device->can_discard = 1; device->writeable = 1; device->work.func = pending_bios_fn; generate_random_uuid(device->uuid); spin_lock_init(&device->io_lock); device->generation = trans->transid; device->io_width = root->sectorsize; device->io_align = root->sectorsize; device->sector_size = root->sectorsize; device->total_bytes = i_size_read(bdev->bd_inode); device->disk_total_bytes = device->total_bytes; device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { sb->s_flags &= ~MS_RDONLY; ret = btrfs_prepare_sprout(trans, root); BUG_ON(ret); } device->fs_devices = root->fs_info->fs_devices; /* * we don't want write_supers to jump in here with our device * half setup */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->num_devices++; root->fs_info->fs_devices->open_devices++; root->fs_info->fs_devices->rw_devices++; if (device->can_discard) root->fs_info->fs_devices->num_can_discard++; root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; if (!blk_queue_nonrot(bdev_get_queue(bdev))) root->fs_info->fs_devices->rotating = 1; total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); btrfs_set_super_total_bytes(&root->fs_info->super_copy, total_bytes + device->total_bytes); total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); btrfs_set_super_num_devices(&root->fs_info->super_copy, total_bytes + 1); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (seeding_dev) { ret = init_first_rw_device(trans, root, device); BUG_ON(ret); ret = btrfs_finish_sprout(trans, root); BUG_ON(ret); } else { ret = btrfs_add_device(trans, root, device); } /* * we've got more storage, clear any full flags on the space * infos */ btrfs_clear_space_info_full(root->fs_info); unlock_chunks(root); btrfs_commit_transaction(trans, root); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); ret = btrfs_relocate_sys_chunks(root); BUG_ON(ret); } out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: blkdev_put(bdev, FMODE_EXCL); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); } goto out; } static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_root *root; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; root = device->dev_root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } static int __btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { struct btrfs_super_block *super_copy = &device->dev_root->fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 diff = new_size - device->total_bytes; if (!device->writeable) return -EACCES; if (new_size <= device->total_bytes) return -EINVAL; btrfs_set_super_total_bytes(super_copy, old_total + diff); device->fs_devices->total_rw_bytes += diff; device->total_bytes = new_size; device->disk_total_bytes = new_size; btrfs_clear_space_info_full(device->dev_root->fs_info); return btrfs_update_device(trans, device); } int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { int ret; lock_chunks(device->dev_root); ret = __btrfs_grow_device(trans, device, new_size); unlock_chunks(device->dev_root); return ret; } static int btrfs_free_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset) { int ret; struct btrfs_path *path; struct btrfs_key key; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = chunk_objectid; key.offset = chunk_offset; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); BUG_ON(ret); ret = btrfs_del_item(trans, root, path); btrfs_free_path(path); return ret; } static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 chunk_offset) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)(ptr + len); num_stripes = btrfs_stack_chunk_num_stripes(chunk); len += btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } if (key.objectid == chunk_objectid && key.offset == chunk_offset) { memmove(ptr, ptr + len, array_size - (cur + len)); array_size -= len; btrfs_set_super_sys_array_size(super_copy, array_size); } else { ptr += len; cur += len; } } return ret; } static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset) { struct extent_map_tree *em_tree; struct btrfs_root *extent_root; struct btrfs_trans_handle *trans; struct extent_map *em; struct map_lookup *map; int ret; int i; root = root->fs_info->chunk_root; extent_root = root->fs_info->extent_root; em_tree = &root->fs_info->mapping_tree.map_tree; ret = btrfs_can_relocate(extent_root, chunk_offset); if (ret) return -ENOSPC; /* step one, relocate all the extents inside this chunk */ ret = btrfs_relocate_block_group(extent_root, chunk_offset); if (ret) return ret; trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); lock_chunks(root); /* * step two, delete the device extents and the * chunk tree entries */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_offset, 1); read_unlock(&em_tree->lock); BUG_ON(em->start > chunk_offset || em->start + em->len < chunk_offset); map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) { ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, map->stripes[i].physical); BUG_ON(ret); if (map->stripes[i].dev) { ret = btrfs_update_device(trans, map->stripes[i].dev); BUG_ON(ret); } } ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, chunk_offset); BUG_ON(ret); trace_btrfs_chunk_free(root, map, chunk_offset, em->len); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); BUG_ON(ret); } ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); BUG_ON(ret); write_lock(&em_tree->lock); remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); kfree(map); em->bdev = NULL; /* once for the tree */ free_extent_map(em); /* once for us */ free_extent_map(em); unlock_chunks(root); btrfs_end_transaction(trans, root); return 0; } static int btrfs_relocate_sys_chunks(struct btrfs_root *root) { struct btrfs_root *chunk_root = root->fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_chunk *chunk; struct btrfs_key key; struct btrfs_key found_key; u64 chunk_tree = chunk_root->root_key.objectid; u64 chunk_type; bool retried = false; int failed = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; again: key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); if (ret < 0) goto error; if (ret > 0) break; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(chunk_root, chunk_tree, found_key.objectid, found_key.offset); if (ret == -ENOSPC) failed++; else if (ret) BUG(); } if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } ret = 0; if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { WARN_ON(1); ret = -ENOSPC; } error: btrfs_free_path(path); return ret; } static u64 div_factor(u64 num, int factor) { if (factor == 10) return num; num *= factor; do_div(num, 10); return num; } int btrfs_balance(struct btrfs_root *dev_root) { int ret; struct list_head *devices = &dev_root->fs_info->fs_devices->devices; struct btrfs_device *device; u64 old_size; u64 size_to_free; struct btrfs_path *path; struct btrfs_key key; struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; struct btrfs_trans_handle *trans; struct btrfs_key found_key; if (dev_root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&dev_root->fs_info->volume_mutex); dev_root = dev_root->fs_info->dev_root; /* step one make some room on all the devices */ list_for_each_entry(device, devices, dev_list) { old_size = device->total_bytes; size_to_free = div_factor(old_size, 1); size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); if (!device->writeable || device->total_bytes - device->bytes_used > size_to_free) continue; ret = btrfs_shrink_device(device, old_size - size_to_free); if (ret == -ENOSPC) break; BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); btrfs_end_transaction(trans, dev_root); } /* step two, relocate all the chunks */ path = btrfs_alloc_path(); BUG_ON(!path); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; /* * this shouldn't happen, it means the last relocate * failed */ if (ret == 0) break; ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) break; btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != key.objectid) break; /* chunk zero is special */ if (found_key.offset == 0) break; btrfs_release_path(path); ret = btrfs_relocate_chunk(chunk_root, chunk_root->root_key.objectid, found_key.objectid, found_key.offset); if (ret && ret != -ENOSPC) goto error; key.offset = found_key.offset - 1; } ret = 0; error: btrfs_free_path(path); mutex_unlock(&dev_root->fs_info->volume_mutex); return ret; } /* * shrinking a device means finding all of the device extents past * the new size, and then following the back refs to the chunks. * The chunk relocation code actually frees the device extent */ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) { struct btrfs_trans_handle *trans; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; u64 length; u64 chunk_tree; u64 chunk_objectid; u64 chunk_offset; int ret; int slot; int failed = 0; bool retried = false; struct extent_buffer *l; struct btrfs_key key; struct btrfs_super_block *super_copy = &root->fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 old_size = device->total_bytes; u64 diff = device->total_bytes - new_size; if (new_size >= device->total_bytes) return -EINVAL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; lock_chunks(root); device->total_bytes = new_size; if (device->writeable) device->fs_devices->total_rw_bytes -= diff; unlock_chunks(root); again: key.objectid = device->devid; key.offset = (u64)-1; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto done; ret = btrfs_previous_item(root, path, 0, key.type); if (ret < 0) goto done; if (ret) { ret = 0; btrfs_release_path(path); break; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { btrfs_release_path(path); break; } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { btrfs_release_path(path); break; } chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); btrfs_release_path(path); ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, chunk_offset); if (ret && ret != -ENOSPC) goto done; if (ret == -ENOSPC) failed++; key.offset -= 1; } if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { ret = -ENOSPC; lock_chunks(root); device->total_bytes = old_size; if (device->writeable) device->fs_devices->total_rw_bytes += diff; unlock_chunks(root); goto done; } /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto done; } lock_chunks(root); device->disk_total_bytes = new_size; /* Now btrfs_update_device() will change the on-disk size. */ ret = btrfs_update_device(trans, device); if (ret) { unlock_chunks(root); btrfs_end_transaction(trans, root); goto done; } WARN_ON(diff > old_total); btrfs_set_super_total_bytes(super_copy, old_total - diff); unlock_chunks(root); btrfs_end_transaction(trans, root); done: btrfs_free_path(path); return ret; } static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_chunk *chunk, int item_size) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct btrfs_disk_key disk_key; u32 array_size; u8 *ptr; array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) return -EFBIG; ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); memcpy(ptr, &disk_key, sizeof(disk_key)); ptr += sizeof(disk_key); memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); return 0; } /* * sort the devices in descending order by max_avail, total_avail */ static int btrfs_cmp_device_info(const void *a, const void *b) { const struct btrfs_device_info *di_a = a; const struct btrfs_device_info *di_b = b; if (di_a->max_avail > di_b->max_avail) return -1; if (di_a->max_avail < di_b->max_avail) return 1; if (di_a->total_avail > di_b->total_avail) return -1; if (di_a->total_avail < di_b->total_avail) return 1; return 0; } static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct map_lookup **map_ret, u64 *num_bytes_out, u64 *stripe_size_out, u64 start, u64 type) { struct btrfs_fs_info *info = extent_root->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct list_head *cur; struct map_lookup *map = NULL; struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_device_info *devices_info = NULL; u64 total_avail; int num_stripes; /* total number of stripes to allocate */ int sub_stripes; /* sub_stripes info for map */ int dev_stripes; /* stripes per dev */ int devs_max; /* max devs to use */ int devs_min; /* min devs needed */ int devs_increment; /* ndevs has to be a multiple of this */ int ncopies; /* how many copies to data has */ int ret; u64 max_stripe_size; u64 max_chunk_size; u64 stripe_size; u64 num_bytes; int ndevs; int i; int j; if ((type & BTRFS_BLOCK_GROUP_RAID1) && (type & BTRFS_BLOCK_GROUP_DUP)) { WARN_ON(1); type &= ~BTRFS_BLOCK_GROUP_DUP; } if (list_empty(&fs_devices->alloc_list)) return -ENOSPC; sub_stripes = 1; dev_stripes = 1; devs_increment = 1; ncopies = 1; devs_max = 0; /* 0 == as many as possible */ devs_min = 1; /* * define the properties of each RAID type. * FIXME: move this to a global table and use it in all RAID * calculation code */ if (type & (BTRFS_BLOCK_GROUP_DUP)) { dev_stripes = 2; ncopies = 2; devs_max = 1; } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) { devs_min = 2; } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) { devs_increment = 2; ncopies = 2; devs_max = 2; devs_min = 2; } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) { sub_stripes = 2; devs_increment = 2; ncopies = 2; devs_min = 4; } else { devs_max = 1; } if (type & BTRFS_BLOCK_GROUP_DATA) { max_stripe_size = 1024 * 1024 * 1024; max_chunk_size = 10 * max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_METADATA) { max_stripe_size = 256 * 1024 * 1024; max_chunk_size = max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { max_stripe_size = 8 * 1024 * 1024; max_chunk_size = 2 * max_stripe_size; } else { printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n", type); BUG_ON(1); } /* we don't want a chunk larger than 10% of writeable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, GFP_NOFS); if (!devices_info) return -ENOMEM; cur = fs_devices->alloc_list.next; /* * in the first pass through the devices list, we gather information * about the available holes on each device. */ ndevs = 0; while (cur != &fs_devices->alloc_list) { struct btrfs_device *device; u64 max_avail; u64 dev_offset; device = list_entry(cur, struct btrfs_device, dev_alloc_list); cur = cur->next; if (!device->writeable) { printk(KERN_ERR "btrfs: read-only device in alloc_list\n"); WARN_ON(1); continue; } if (!device->in_fs_metadata) continue; if (device->total_bytes > device->bytes_used) total_avail = device->total_bytes - device->bytes_used; else total_avail = 0; /* avail is off by max(alloc_start, 1MB), but that is the same * for all devices, so it doesn't hurt the sorting later on */ ret = find_free_dev_extent(trans, device, max_stripe_size * dev_stripes, &dev_offset, &max_avail); if (ret && ret != -ENOSPC) goto error; if (ret == 0) max_avail = max_stripe_size * dev_stripes; if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) continue; devices_info[ndevs].dev_offset = dev_offset; devices_info[ndevs].max_avail = max_avail; devices_info[ndevs].total_avail = total_avail; devices_info[ndevs].dev = device; ++ndevs; } /* * now sort the devices by hole size / available space */ sort(devices_info, ndevs, sizeof(struct btrfs_device_info), btrfs_cmp_device_info, NULL); /* round down to number of usable stripes */ ndevs -= ndevs % devs_increment; if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { ret = -ENOSPC; goto error; } if (devs_max && ndevs > devs_max) ndevs = devs_max; /* * the primary goal is to maximize the number of stripes, so use as many * devices as possible, even if the stripes are not maximum sized. */ stripe_size = devices_info[ndevs-1].max_avail; num_stripes = ndevs * dev_stripes; if (stripe_size * num_stripes > max_chunk_size * ncopies) { stripe_size = max_chunk_size * ncopies; do_div(stripe_size, num_stripes); } do_div(stripe_size, dev_stripes); do_div(stripe_size, BTRFS_STRIPE_LEN); stripe_size *= BTRFS_STRIPE_LEN; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { ret = -ENOMEM; goto error; } map->num_stripes = num_stripes; for (i = 0; i < ndevs; ++i) { for (j = 0; j < dev_stripes; ++j) { int s = i * dev_stripes + j; map->stripes[s].dev = devices_info[i].dev; map->stripes[s].physical = devices_info[i].dev_offset + j * stripe_size; } } map->sector_size = extent_root->sectorsize; map->stripe_len = BTRFS_STRIPE_LEN; map->io_align = BTRFS_STRIPE_LEN; map->io_width = BTRFS_STRIPE_LEN; map->type = type; map->sub_stripes = sub_stripes; *map_ret = map; num_bytes = stripe_size * (num_stripes / ncopies); *stripe_size_out = stripe_size; *num_bytes_out = num_bytes; trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); em = alloc_extent_map(); if (!em) { ret = -ENOMEM; goto error; } em->bdev = (struct block_device *)map; em->start = start; em->len = num_bytes; em->block_start = 0; em->block_len = em->len; em_tree = &extent_root->fs_info->mapping_tree.map_tree; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); BUG_ON(ret); free_extent_map(em); ret = btrfs_make_block_group(trans, extent_root, 0, type, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); BUG_ON(ret); for (i = 0; i < map->num_stripes; ++i) { struct btrfs_device *device; u64 dev_offset; device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; ret = btrfs_alloc_dev_extent(trans, device, info->chunk_root->root_key.objectid, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, dev_offset, stripe_size); BUG_ON(ret); } kfree(devices_info); return 0; error: kfree(map); kfree(devices_info); return ret; } static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct map_lookup *map, u64 chunk_offset, u64 chunk_size, u64 stripe_size) { u64 dev_offset; struct btrfs_key key; struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; struct btrfs_device *device; struct btrfs_chunk *chunk; struct btrfs_stripe *stripe; size_t item_size = btrfs_chunk_item_size(map->num_stripes); int index = 0; int ret; chunk = kzalloc(item_size, GFP_NOFS); if (!chunk) return -ENOMEM; index = 0; while (index < map->num_stripes) { device = map->stripes[index].dev; device->bytes_used += stripe_size; ret = btrfs_update_device(trans, device); BUG_ON(ret); index++; } index = 0; stripe = &chunk->stripe; while (index < map->num_stripes) { device = map->stripes[index].dev; dev_offset = map->stripes[index].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; index++; } btrfs_set_stack_chunk_length(chunk, chunk_size); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; key.offset = chunk_offset; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); BUG_ON(ret); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, item_size); BUG_ON(ret); } kfree(chunk); return 0; } /* * Chunk allocation falls into two parts. The first part does works * that make the new allocated chunk useable, but not do any operation * that modifies the chunk tree. The second part does the works that * require modifying the chunk tree. This division is important for the * bootstrap process of adding storage to a seed btrfs. */ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 type) { u64 chunk_offset; u64 chunk_size; u64 stripe_size; struct map_lookup *map; struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; int ret; ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); if (ret) return ret; ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, &stripe_size, chunk_offset, type); if (ret) return ret; ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, chunk_size, stripe_size); BUG_ON(ret); return 0; } static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { u64 chunk_offset; u64 sys_chunk_offset; u64 chunk_size; u64 sys_chunk_size; u64 stripe_size; u64 sys_stripe_size; u64 alloc_profile; struct map_lookup *map; struct map_lookup *sys_map; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; int ret; ret = find_next_chunk(fs_info->chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); BUG_ON(ret); alloc_profile = BTRFS_BLOCK_GROUP_METADATA | (fs_info->metadata_alloc_profile & fs_info->avail_metadata_alloc_bits); alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, &stripe_size, chunk_offset, alloc_profile); BUG_ON(ret); sys_chunk_offset = chunk_offset + chunk_size; alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | (fs_info->system_alloc_profile & fs_info->avail_system_alloc_bits); alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, &sys_chunk_size, &sys_stripe_size, sys_chunk_offset, alloc_profile); BUG_ON(ret); ret = btrfs_add_device(trans, fs_info->chunk_root, device); BUG_ON(ret); /* * Modifying chunk tree needs allocating new blocks from both * system block group and metadata block group. So we only can * do operations require modifying the chunk tree after both * block groups were created. */ ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, chunk_size, stripe_size); BUG_ON(ret); ret = __finish_chunk_alloc(trans, extent_root, sys_map, sys_chunk_offset, sys_chunk_size, sys_stripe_size); BUG_ON(ret); return 0; } int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) { struct extent_map *em; struct map_lookup *map; struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; int readonly = 0; int i; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) return 1; if (btrfs_test_opt(root, DEGRADED)) { free_extent_map(em); return 0; } map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) { if (!map->stripes[i].dev->writeable) { readonly = 1; break; } } free_extent_map(em); return readonly; } void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; while (1) { write_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) remove_extent_mapping(&tree->map_tree, em); write_unlock(&tree->map_tree.lock); if (!em) break; kfree(em->bdev); /* once for us */ free_extent_map(em); /* once for the tree */ free_extent_map(em); } } int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; struct extent_map_tree *em_tree = &map_tree->map_tree; int ret; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, len); read_unlock(&em_tree->lock); BUG_ON(!em); BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) ret = map->num_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID10) ret = map->sub_stripes; else ret = 1; free_extent_map(em); return ret; } static int find_live_mirror(struct map_lookup *map, int first, int num, int optimal) { int i; if (map->stripes[optimal].dev->bdev) return optimal; for (i = first; i < first + num; i++) { if (map->stripes[i].dev->bdev) return i; } /* we couldn't find one that doesn't fail. Just return something * and the io error handling code will clean up eventually */ return optimal; } static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, struct btrfs_multi_bio **multi_ret, int mirror_num) { struct extent_map *em; struct map_lookup *map; struct extent_map_tree *em_tree = &map_tree->map_tree; u64 offset; u64 stripe_offset; u64 stripe_end_offset; u64 stripe_nr; u64 stripe_nr_orig; u64 stripe_nr_end; int stripes_allocated = 8; int stripes_required = 1; int stripe_index; int i; int num_stripes; int max_errors = 0; struct btrfs_multi_bio *multi = NULL; if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD))) stripes_allocated = 1; again: if (multi_ret) { multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), GFP_NOFS); if (!multi) return -ENOMEM; atomic_set(&multi->error, 0); } read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, *length); read_unlock(&em_tree->lock); if (!em) { printk(KERN_CRIT "unable to find logical %llu len %llu\n", (unsigned long long)logical, (unsigned long long)*length); BUG(); } BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; offset = logical - em->start; if (mirror_num > map->num_stripes) mirror_num = 0; /* if our multi bio struct is too small, back off and try again */ if (rw & REQ_WRITE) { if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) { stripes_required = map->num_stripes; max_errors = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripes_required = map->sub_stripes; max_errors = 1; } } if (rw & REQ_DISCARD) { if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID10)) { stripes_required = map->num_stripes; } } if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) && stripes_allocated < stripes_required) { stripes_allocated = map->num_stripes; free_extent_map(em); kfree(multi); goto again; } stripe_nr = offset; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ do_div(stripe_nr, map->stripe_len); stripe_offset = stripe_nr * map->stripe_len; BUG_ON(offset < stripe_offset); /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; if (rw & REQ_DISCARD) *length = min_t(u64, em->len - offset, *length); else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_DUP)) { /* we limit the length of each bio to what fits in a stripe */ *length = min_t(u64, em->len - offset, map->stripe_len - stripe_offset); } else { *length = em->len - offset; } if (!multi_ret) goto out; num_stripes = 1; stripe_index = 0; stripe_nr_orig = stripe_nr; stripe_nr_end = (offset + *length + map->stripe_len - 1) & (~(map->stripe_len - 1)); do_div(stripe_nr_end, map->stripe_len); stripe_end_offset = stripe_nr_end * map->stripe_len - (offset + *length); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { if (rw & REQ_DISCARD) num_stripes = min_t(u64, map->num_stripes, stripe_nr_end - stripe_nr_orig); stripe_index = do_div(stripe_nr, map->num_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (rw & (REQ_WRITE | REQ_DISCARD)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; else { stripe_index = find_live_mirror(map, 0, map->num_stripes, current->pid % map->num_stripes); } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { if (rw & (REQ_WRITE | REQ_DISCARD)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; stripe_index = do_div(stripe_nr, factor); stripe_index *= map->sub_stripes; if (rw & REQ_WRITE) num_stripes = map->sub_stripes; else if (rw & REQ_DISCARD) num_stripes = min_t(u64, map->sub_stripes * (stripe_nr_end - stripe_nr_orig), map->num_stripes); else if (mirror_num) stripe_index += mirror_num - 1; else { stripe_index = find_live_mirror(map, stripe_index, map->sub_stripes, stripe_index + current->pid % map->sub_stripes); } } else { /* * after this do_div call, stripe_nr is the number of stripes * on this device we have to walk to find the data, and * stripe_index is the number of our device in the stripe array */ stripe_index = do_div(stripe_nr, map->num_stripes); } BUG_ON(stripe_index >= map->num_stripes); if (rw & REQ_DISCARD) { for (i = 0; i < num_stripes; i++) { multi->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; multi->stripes[i].dev = map->stripes[stripe_index].dev; if (map->type & BTRFS_BLOCK_GROUP_RAID0) { u64 stripes; u32 last_stripe = 0; int j; div_u64_rem(stripe_nr_end - 1, map->num_stripes, &last_stripe); for (j = 0; j < map->num_stripes; j++) { u32 test; div_u64_rem(stripe_nr_end - 1 - j, map->num_stripes, &test); if (test == stripe_index) break; } stripes = stripe_nr_end - 1 - j; do_div(stripes, map->num_stripes); multi->stripes[i].length = map->stripe_len * (stripes - stripe_nr + 1); if (i == 0) { multi->stripes[i].length -= stripe_offset; stripe_offset = 0; } if (stripe_index == last_stripe) multi->stripes[i].length -= stripe_end_offset; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { u64 stripes; int j; int factor = map->num_stripes / map->sub_stripes; u32 last_stripe = 0; div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); last_stripe *= map->sub_stripes; for (j = 0; j < factor; j++) { u32 test; div_u64_rem(stripe_nr_end - 1 - j, factor, &test); if (test == stripe_index / map->sub_stripes) break; } stripes = stripe_nr_end - 1 - j; do_div(stripes, factor); multi->stripes[i].length = map->stripe_len * (stripes - stripe_nr + 1); if (i < map->sub_stripes) { multi->stripes[i].length -= stripe_offset; if (i == map->sub_stripes - 1) stripe_offset = 0; } if (stripe_index >= last_stripe && stripe_index <= (last_stripe + map->sub_stripes - 1)) { multi->stripes[i].length -= stripe_end_offset; } } else multi->stripes[i].length = *length; stripe_index++; if (stripe_index == map->num_stripes) { /* This could only happen for RAID0/10 */ stripe_index = 0; stripe_nr++; } } } else { for (i = 0; i < num_stripes; i++) { multi->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; multi->stripes[i].dev = map->stripes[stripe_index].dev; stripe_index++; } } if (multi_ret) { *multi_ret = multi; multi->num_stripes = num_stripes; multi->max_errors = max_errors; } out: free_extent_map(em); return 0; } int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, struct btrfs_multi_bio **multi_ret, int mirror_num) { return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, mirror_num); } int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len) { struct extent_map_tree *em_tree = &map_tree->map_tree; struct extent_map *em; struct map_lookup *map; u64 *buf; u64 bytenr; u64 length; u64 stripe_nr; int i, j, nr = 0; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_start, 1); read_unlock(&em_tree->lock); BUG_ON(!em || em->start != chunk_start); map = (struct map_lookup *)em->bdev; length = em->len; if (map->type & BTRFS_BLOCK_GROUP_RAID10) do_div(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) do_div(length, map->num_stripes); buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); BUG_ON(!buf); for (i = 0; i < map->num_stripes; i++) { if (devid && map->stripes[i].dev->devid != devid) continue; if (map->stripes[i].physical > physical || map->stripes[i].physical + length <= physical) continue; stripe_nr = physical - map->stripes[i].physical; do_div(stripe_nr, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripe_nr = stripe_nr * map->num_stripes + i; do_div(stripe_nr, map->sub_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = stripe_nr * map->num_stripes + i; } bytenr = chunk_start + stripe_nr * map->stripe_len; WARN_ON(nr >= map->num_stripes); for (j = 0; j < nr; j++) { if (buf[j] == bytenr) break; } if (j == nr) { WARN_ON(nr >= map->num_stripes); buf[nr++] = bytenr; } } *logical = buf; *naddrs = nr; *stripe_len = map->stripe_len; free_extent_map(em); return 0; } static void end_bio_multi_stripe(struct bio *bio, int err) { struct btrfs_multi_bio *multi = bio->bi_private; int is_orig_bio = 0; if (err) atomic_inc(&multi->error); if (bio == multi->orig_bio) is_orig_bio = 1; if (atomic_dec_and_test(&multi->stripes_pending)) { if (!is_orig_bio) { bio_put(bio); bio = multi->orig_bio; } bio->bi_private = multi->private; bio->bi_end_io = multi->end_io; /* only send an error to the higher layers if it is * beyond the tolerance of the multi-bio */ if (atomic_read(&multi->error) > multi->max_errors) { err = -EIO; } else if (err) { /* * this bio is actually up to date, we didn't * go over the max number of errors */ set_bit(BIO_UPTODATE, &bio->bi_flags); err = 0; } kfree(multi); bio_endio(bio, err); } else if (!is_orig_bio) { bio_put(bio); } } struct async_sched { struct bio *bio; int rw; struct btrfs_fs_info *info; struct btrfs_work work; }; /* * see run_scheduled_bios for a description of why bios are collected for * async submit. * * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ static noinline int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { int should_queue = 1; struct btrfs_pending_bios *pending_bios; /* don't bother with additional async steps for reads, right now */ if (!(rw & REQ_WRITE)) { bio_get(bio); submit_bio(rw, bio); bio_put(bio); return 0; } /* * nr_async_bios allows us to reliably return congestion to the * higher layers. Otherwise, the async bio makes it appear we have * made progress against dirty pages when we've really just put it * on a queue for later */ atomic_inc(&root->fs_info->nr_async_bios); WARN_ON(bio->bi_next); bio->bi_next = NULL; bio->bi_rw |= rw; spin_lock(&device->io_lock); if (bio->bi_rw & REQ_SYNC) pending_bios = &device->pending_sync_bios; else pending_bios = &device->pending_bios; if (pending_bios->tail) pending_bios->tail->bi_next = bio; pending_bios->tail = bio; if (!pending_bios->head) pending_bios->head = bio; if (device->running_pending) should_queue = 0; spin_unlock(&device->io_lock); if (should_queue) btrfs_queue_worker(&root->fs_info->submit_workers, &device->work); return 0; } int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int mirror_num, int async_submit) { struct btrfs_mapping_tree *map_tree; struct btrfs_device *dev; struct bio *first_bio = bio; u64 logical = (u64)bio->bi_sector << 9; u64 length = 0; u64 map_length; struct btrfs_multi_bio *multi = NULL; int ret; int dev_nr = 0; int total_devs = 1; length = bio->bi_size; map_tree = &root->fs_info->mapping_tree; map_length = length; ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, mirror_num); BUG_ON(ret); total_devs = multi->num_stripes; if (map_length < length) { printk(KERN_CRIT "mapping failed logical %llu bio len %llu " "len %llu\n", (unsigned long long)logical, (unsigned long long)length, (unsigned long long)map_length); BUG(); } multi->end_io = first_bio->bi_end_io; multi->private = first_bio->bi_private; multi->orig_bio = first_bio; atomic_set(&multi->stripes_pending, multi->num_stripes); while (dev_nr < total_devs) { if (total_devs > 1) { if (dev_nr < total_devs - 1) { bio = bio_clone(first_bio, GFP_NOFS); BUG_ON(!bio); } else { bio = first_bio; } bio->bi_private = multi; bio->bi_end_io = end_bio_multi_stripe; } bio->bi_sector = multi->stripes[dev_nr].physical >> 9; dev = multi->stripes[dev_nr].dev; if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { bio->bi_bdev = dev->bdev; if (async_submit) schedule_bio(root, dev, rw, bio); else submit_bio(rw, bio); } else { bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; bio->bi_sector = logical >> 9; bio_endio(bio, -EIO); } dev_nr++; } if (total_devs == 1) kfree(multi); return 0; } struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, u8 *uuid, u8 *fsid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; cur_devices = root->fs_info->fs_devices; while (cur_devices) { if (!fsid || !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { device = __find_device(&cur_devices->devices, devid, uuid); if (device) return device; } cur_devices = cur_devices->seed; } return NULL; } static struct btrfs_device *add_missing_dev(struct btrfs_root *root, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) return NULL; list_add(&device->dev_list, &fs_devices->devices); device->dev_root = root->fs_info->dev_root; device->devid = devid; device->work.func = pending_bios_fn; device->fs_devices = fs_devices; device->missing = 1; fs_devices->num_devices++; fs_devices->missing_devices++; spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_alloc_list); memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); return device; } static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(); if (!em) return -ENOMEM; num_stripes = btrfs_chunk_num_stripes(leaf, chunk); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } em->bdev = (struct block_device *)map; em->start = logical; em->len = length; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->sector_size = btrfs_chunk_sector_size(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(root, devid, uuid, NULL); if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { kfree(map); free_extent_map(em); return -EIO; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(root, devid, uuid); if (!map->stripes[i].dev) { kfree(map); free_extent_map(em); return -EIO; } } map->stripes[i].dev->in_fs_metadata = 1; } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em); write_unlock(&map_tree->map_tree.lock); BUG_ON(ret); free_extent_map(em); return 0; } static int fill_device_from_item(struct extent_buffer *leaf, struct btrfs_dev_item *dev_item, struct btrfs_device *device) { unsigned long ptr; device->devid = btrfs_device_id(leaf, dev_item); device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); device->total_bytes = device->disk_total_bytes; device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); device->type = btrfs_device_type(leaf, dev_item); device->io_align = btrfs_device_io_align(leaf, dev_item); device->io_width = btrfs_device_io_width(leaf, dev_item); device->sector_size = btrfs_device_sector_size(leaf, dev_item); ptr = (unsigned long)btrfs_device_uuid(dev_item); read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); return 0; } static int open_seed_devices(struct btrfs_root *root, u8 *fsid) { struct btrfs_fs_devices *fs_devices; int ret; mutex_lock(&uuid_mutex); fs_devices = root->fs_info->fs_devices->seed; while (fs_devices) { if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { ret = 0; goto out; } fs_devices = fs_devices->seed; } fs_devices = find_fsid(fsid); if (!fs_devices) { ret = -ENOENT; goto out; } fs_devices = clone_fs_devices(fs_devices); if (IS_ERR(fs_devices)) { ret = PTR_ERR(fs_devices); goto out; } ret = __btrfs_open_devices(fs_devices, FMODE_READ, root->fs_info->bdev_holder); if (ret) goto out; if (!fs_devices->seeding) { __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); ret = -EINVAL; goto out; } fs_devices->seed = root->fs_info->fs_devices->seed; root->fs_info->fs_devices->seed = fs_devices; out: mutex_unlock(&uuid_mutex); return ret; } static int read_one_dev(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_dev_item *dev_item) { struct btrfs_device *device; u64 devid; int ret; u8 fs_uuid[BTRFS_UUID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE); if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { ret = open_seed_devices(root, fs_uuid); if (ret && !btrfs_test_opt(root, DEGRADED)) return ret; } device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); if (!device || !device->bdev) { if (!btrfs_test_opt(root, DEGRADED)) return -EIO; if (!device) { printk(KERN_WARNING "warning devid %llu missing\n", (unsigned long long)devid); device = add_missing_dev(root, devid, dev_uuid); if (!device) return -ENOMEM; } else if (!device->missing) { /* * this happens when a device that was properly setup * in the device info lists suddenly goes bad. * device->bdev is NULL, and so we have to set * device->missing to one here */ root->fs_info->fs_devices->missing_devices++; device->missing = 1; } } if (device->fs_devices != root->fs_info->fs_devices) { BUG_ON(device->writeable); if (device->generation != btrfs_device_generation(leaf, dev_item)) return -EINVAL; } fill_device_from_item(leaf, dev_item, device); device->dev_root = root->fs_info->dev_root; device->in_fs_metadata = 1; if (device->writeable) device->fs_devices->total_rw_bytes += device->total_bytes; ret = 0; return ret; } int btrfs_read_sys_array(struct btrfs_root *root) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; unsigned long sb_ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE); if (!sb) return -ENOMEM; btrfs_set_buffer_uptodate(sb); btrfs_set_buffer_lockdep_class(sb, 0); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); ptr += len; sb_ptr += len; cur += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_ptr; ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; num_stripes = btrfs_chunk_num_stripes(sb, chunk); len = btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } ptr += len; sb_ptr += len; cur += len; } free_extent_buffer(sb); return ret; } int btrfs_read_chunk_tree(struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; int ret; int slot; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* first we search for all of the device items, and then we * read in all of the chunk items. This way we can create chunk * mappings that reference all of the devices that are afound */ key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = 0; again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto error; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) break; if (found_key.type == BTRFS_DEV_ITEM_KEY) { struct btrfs_dev_item *dev_item; dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); ret = read_one_dev(root, leaf, dev_item); if (ret) goto error; } } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(root, &found_key, leaf, chunk); if (ret) goto error; } path->slots[0]++; } if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { key.objectid = 0; btrfs_release_path(path); goto again; } ret = 0; error: btrfs_free_path(path); return ret; }
gpl-2.0
YogeshNain/linux
net/irda/irlan/irlan_filter.c
1426
6352
/********************************************************************* * * Filename: irlan_filter.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Fri Jan 29 11:16:38 1999 * Modified at: Sat Oct 30 12:58:45 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/random.h> #include <linux/seq_file.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_filter.h> /* * Function irlan_filter_request (self, skb) * * Handle filter request from client peer device * */ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_operation == DYNAMIC)) { pr_debug("Giving peer a dynamic Ethernet address\n"); self->provider.mac_address[0] = 0x40; self->provider.mac_address[1] = 0x00; self->provider.mac_address[2] = 0x00; self->provider.mac_address[3] = 0x00; /* Use arbitration value to generate MAC address */ if (self->provider.access_type == ACCESS_PEER) { self->provider.mac_address[4] = self->provider.send_arb_val & 0xff; self->provider.mac_address[5] = (self->provider.send_arb_val >> 8) & 0xff; } else { /* Just generate something for now */ get_random_bytes(self->provider.mac_address+4, 1); get_random_bytes(self->provider.mac_address+5, 1); } skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x03; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001); irlan_insert_array_param(skb, "FILTER_ENTRY", self->provider.mac_address, 6); return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == FILTER)) { pr_debug("Directed filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == NONE)) { pr_debug("Directed filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == FILTER)) { pr_debug("Broadcast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == NONE)) { pr_debug("Broadcast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == FILTER)) { pr_debug("Multicast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == NONE)) { pr_debug("Multicast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_operation == GET)) { pr_debug("Multicast filter get\n"); skb->data[0] = 0x00; /* Success? */ skb->data[1] = 0x02; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 16); return; } skb->data[0] = 0x00; /* Command not supported */ skb->data[1] = 0x00; pr_debug("Not implemented!\n"); } /* * Function check_request_param (self, param, value) * * Check parameters in request from peer device * */ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); pr_debug("%s, %s\n", param, value); /* * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { self->use_udata = TRUE; return; } /* * FILTER_TYPE */ if (strcmp(param, "FILTER_TYPE") == 0) { if (strcmp(value, "DIRECTED") == 0) { self->provider.filter_type = IRLAN_DIRECTED; return; } if (strcmp(value, "MULTICAST") == 0) { self->provider.filter_type = IRLAN_MULTICAST; return; } if (strcmp(value, "BROADCAST") == 0) { self->provider.filter_type = IRLAN_BROADCAST; return; } } /* * FILTER_MODE */ if (strcmp(param, "FILTER_MODE") == 0) { if (strcmp(value, "ALL") == 0) { self->provider.filter_mode = ALL; return; } if (strcmp(value, "FILTER") == 0) { self->provider.filter_mode = FILTER; return; } if (strcmp(value, "NONE") == 0) { self->provider.filter_mode = FILTER; return; } } /* * FILTER_OPERATION */ if (strcmp(param, "FILTER_OPERATION") == 0) { if (strcmp(value, "DYNAMIC") == 0) { self->provider.filter_operation = DYNAMIC; return; } if (strcmp(value, "GET") == 0) { self->provider.filter_operation = GET; return; } } } /* * Function irlan_print_filter (filter_type, buf) * * Print status of filter. Used by /proc file system * */ #ifdef CONFIG_PROC_FS #define MASK2STR(m,s) { .mask = m, .str = s } void irlan_print_filter(struct seq_file *seq, int filter_type) { static struct { int mask; const char *str; } filter_mask2str[] = { MASK2STR(IRLAN_DIRECTED, "DIRECTED"), MASK2STR(IRLAN_FUNCTIONAL, "FUNCTIONAL"), MASK2STR(IRLAN_GROUP, "GROUP"), MASK2STR(IRLAN_MAC_FRAME, "MAC_FRAME"), MASK2STR(IRLAN_MULTICAST, "MULTICAST"), MASK2STR(IRLAN_BROADCAST, "BROADCAST"), MASK2STR(IRLAN_IPX_SOCKET, "IPX_SOCKET"), MASK2STR(0, NULL) }, *p; for (p = filter_mask2str; p->str; p++) { if (filter_type & p->mask) seq_printf(seq, "%s ", p->str); } seq_putc(seq, '\n'); } #undef MASK2STR #endif
gpl-2.0
varund7726/android_kernel_oneplus_msm8974
drivers/staging/vt6656/usbpipe.c
2194
20854
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: usbpipe.c * * Purpose: Handle USB control endpoint * * Author: Warren Hsu * * Date: Mar. 29, 2005 * * Functions: * CONTROLnsRequestOut - Write variable length bytes to MEM/BB/MAC/EEPROM * CONTROLnsRequestIn - Read variable length bytes from MEM/BB/MAC/EEPROM * ControlvWriteByte - Write one byte to MEM/BB/MAC/EEPROM * ControlvReadByte - Read one byte from MEM/BB/MAC/EEPROM * ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set some bits in the same address * * Revision History: * 04-05-2004 Jerry Chen: Initial release * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,ControlvMaskByte * */ #include "int.h" #include "rxtx.h" #include "dpc.h" #include "control.h" #include "desc.h" #include "device.h" /*--------------------- Static Definitions -------------------------*/ //endpoint def //endpoint 0: control //endpoint 1: interrupt //endpoint 2: read bulk //endpoint 3: write bulk //RequestType: //#define REQUEST_OUT (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE) // 0x40 //#define REQUEST_IN (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE ) //0xc0 //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; #define USB_CTL_WAIT 500 //ms #ifndef URB_ASYNC_UNLINK #define URB_ASYNC_UNLINK 0 #endif /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkOutIoCompleteWrite( struct urb *urb ); static void s_nsControlInUsbIoCompleteRead( struct urb *urb ); static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ int PIPEnsControlOutAsyn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; if (in_interrupt()) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"in_interrupt return ..byRequest %x\n", byRequest); return STATUS_FAILURE; } ntStatus = usb_control_msg( pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), byRequest, 0x40, // RequestType wValue, wIndex, (void *) pbyBuffer, wLength, HZ ); if (ntStatus >= 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe ntStatus= %d\n", ntStatus); ntStatus = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe fail, ntStatus= %d\n", ntStatus); } return ntStatus; } int PIPEnsControlOut( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; // Notice, pbyBuffer limited point to variable buffer, can't be constant. usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteWrite, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control send request submission failed: %d\n", ntStatus); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_WRITES) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control send request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return STATUS_SUCCESS; } int PIPEnsControlIn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; MP_SET_FLAG(pDevice, fMP_CONTROL_READS); pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_rcvctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control request submission failed: %d\n", ntStatus); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); return STATUS_FAILURE; } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_READS) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control rcv request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return ntStatus; } static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); } /* * Description: * Complete function of usb Control callback * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsControlInUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status = ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); } /* * Description: * Allocates an usb interrupt in irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsInterruptRead(PSDevice pDevice) { int ntStatus = STATUS_FAILURE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n"); if(pDevice->intBuf.bInUse == TRUE){ return (STATUS_FAILURE); } pDevice->intBuf.bInUse = TRUE; // pDevice->bEventAvailable = FALSE; pDevice->ulIntInPosted++; // // Now that we have created the urb, we will send a // request to the USB device object. // pDevice->pInterruptURB->interval = pDevice->int_interval; usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----s_nsStartInterruptUsbRead Return(%x)\n",ntStatus); return ntStatus; } /* * Description: * Complete function of usb interrupt in irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; int ntStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n"); // // The context given to IoSetCompletionRoutine is the receive buffer object // pDevice = (PSDevice)urb->context; // // We have a number of cases: // 1) The USB read timed out and we received no data. // 2) The USB read timed out and we received some data. // 3) The USB read was successful and fully filled our irp buffer. // 4) The irp was cancelled. // 5) Some other failure from the USB device object. // ntStatus = urb->status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsInterruptUsbIoCompleteRead Status %d\n", ntStatus); // if we were not successful, we need to free the int buffer for future use right here // otherwise interrupt data handler will free int buffer after it handle it. if (( ntStatus != STATUS_SUCCESS )) { pDevice->ulBulkInError++; pDevice->intBuf.bInUse = FALSE; // if (ntStatus == USBD_STATUS_CRC) { // pDevice->ulIntInContCRCError++; // } // if (ntStatus == STATUS_NOT_CONNECTED ) // { pDevice->fKillEventPollingThread = TRUE; // } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus ); } else { pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length; pDevice->ulIntInContCRCError = 0; pDevice->bEventAvailable = TRUE; INTnsProcessData(pDevice); } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus); if (pDevice->fKillEventPollingThread != TRUE) { usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } } // // We return STATUS_MORE_PROCESSING_REQUIRED so that the completion // routine (IofCompleteRequest) will stop working on the irp. // return ; } /* * Description: * Allocates an usb BulkIn irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB) { int ntStatus = 0; struct urb *pUrb; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n"); if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; pDevice->ulBulkInPosted++; pUrb = pRCB->pUrb; // // Now that we have created the urb, we will send a // request to the USB device object. // if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pRCB->skb is null \n"); return ntStatus; } usb_fill_bulk_urb(pUrb, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 2), (void *) (pRCB->skb->data), MAX_TOTAL_SIZE_WITH_ALL_HEADERS, s_nsBulkInUsbIoCompleteRead, pRCB); ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus); return STATUS_FAILURE ; } pRCB->Ref = 1; pRCB->bBoolInUse= TRUE; return ntStatus; } /* * Description: * Complete function of usb BulkIn irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ) { PRCB pRCB = (PRCB)urb->context; PSDevice pDevice = (PSDevice)pRCB->pDevice; unsigned long bytesRead; BOOL bIndicateReceive = FALSE; BOOL bReAllocSkb = FALSE; int status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n"); status = urb->status; bytesRead = urb->actual_length; if (status) { pDevice->ulBulkInError++; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status); pDevice->scStatistic.RxFcsErrCnt ++; //todo...xxxxxx // if (status == USBD_STATUS_CRC) { // pDevice->ulBulkInContCRCError++; // } // if (status == STATUS_DEVICE_NOT_CONNECTED ) // { // MP_SET_FLAG(pDevice, fMP_DISCONNECTED); // } } else { bIndicateReceive = TRUE; pDevice->ulBulkInContCRCError = 0; pDevice->ulBulkInBytesRead += bytesRead; pDevice->scStatistic.RxOkCnt ++; } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkInStat, status); if (bIndicateReceive) { spin_lock(&pDevice->lock); if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == TRUE) bReAllocSkb = TRUE; spin_unlock(&pDevice->lock); } pRCB->Ref--; if (pRCB->Ref == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d \n",pDevice->NumRecvFreeList); spin_lock(&pDevice->lock); RXvFreeRCB(pRCB, bReAllocSkb); spin_unlock(&pDevice->lock); } return; } /* * Description: * Allocates an usb BulkOut irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsSendBulkOut( PSDevice pDevice, PUSB_SEND_CONTEXT pContext ) { int status; struct urb *pUrb; pDevice->bPWBitOn = FALSE; /* if (pDevice->pPendingBulkOutContext != NULL) { pDevice->NumContextsQueued++; EnqueueContext(pDevice->FirstTxContextQueue, pDevice->LastTxContextQueue, pContext); status = STATUS_PENDING; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send pending!\n"); return status; } */ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n"); if (MP_IS_READY(pDevice) && (pDevice->Flags & fMP_POST_WRITES)) { pUrb = pContext->pUrb; pDevice->ulBulkOutPosted++; // pDevice->pPendingBulkOutContext = pContext; usb_fill_bulk_urb( pUrb, pDevice->usb, usb_sndbulkpipe(pDevice->usb, 3), (void *) &(pContext->Data[0]), pContext->uBufLen, s_nsBulkOutIoCompleteWrite, pContext); status = usb_submit_urb(pUrb, GFP_ATOMIC); if (status != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status); return STATUS_FAILURE; } return STATUS_PENDING; } else { pContext->bBoolInUse = FALSE; return STATUS_RESOURCES; } } /* * Description: s_nsBulkOutIoCompleteWrite * 1a) Indicate to the protocol the status of the write. * 1b) Return ownership of the packet to the protocol. * * 2) If any more packets are queue for sending, send another packet * to USBD. * If the attempt to send the packet to the driver fails, * return ownership of the packet to the protocol and * try another packet (until one succeeds). * * Parameters: * In: * pdoUsbDevObj - pointer to the USB device object which * completed the irp * pIrp - the irp which was completed by the * device object * pContext - the context given to IoSetCompletionRoutine * before calling IoCallDriver on the irp * The pContext is a pointer to the USB device object. * Out: * none * * Return Value: STATUS_MORE_PROCESSING_REQUIRED - allows the completion routine * (IofCompleteRequest) to stop working on the irp. * */ static void s_nsBulkOutIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; int status; CONTEXT_TYPE ContextType; unsigned long ulBufLen; PUSB_SEND_CONTEXT pContext; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n"); // // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct // pContext = (PUSB_SEND_CONTEXT) urb->context; ASSERT( NULL != pContext ); pDevice = pContext->pDevice; ContextType = pContext->Type; ulBufLen = pContext->uBufLen; if (!netif_device_present(pDevice->dev)) return; // // Perform various IRP, URB, and buffer 'sanity checks' // status = urb->status; //we should have failed, succeeded, or cancelled, but NOT be pending STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status); if(status == STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen); pDevice->ulBulkOutBytesWrite += ulBufLen; pDevice->ulBulkOutContCRCError = 0; pDevice->nTxDataTimeCout = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status); pDevice->ulBulkOutError++; } // pDevice->ulCheckForHangCount = 0; // pDevice->pPendingBulkOutContext = NULL; if ( CONTEXT_DATA_PACKET == ContextType ) { // Indicate to the protocol the status of the sent packet and return // ownership of the packet. if (pContext->pPacket != NULL) { dev_kfree_skb_irq(pContext->pPacket); pContext->pPacket = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx %d bytes\n",(int)ulBufLen); } pDevice->dev->trans_start = jiffies; if (status == STATUS_SUCCESS) { pDevice->packetsSent++; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status); pDevice->packetsSentDropped++; } } if (pDevice->bLinkPass == TRUE) { if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } pContext->bBoolInUse = FALSE; return; }
gpl-2.0
nutterpc/demonkernel-I9505-TW
drivers/gpio/gpio-mpc8xxx.c
2706
10168
/* * GPIOs on MPC512x/8349/8572/8610 and compatible * * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/irq.h> #define MPC8XXX_GPIO_PINS 32 #define GPIO_DIR 0x00 #define GPIO_ODR 0x04 #define GPIO_DAT 0x08 #define GPIO_IER 0x0c #define GPIO_IMR 0x10 #define GPIO_ICR 0x14 #define GPIO_ICR2 0x18 struct mpc8xxx_gpio_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; /* * shadowed data register to be able to clear/set output pins in * open drain mode safely */ u32 data; struct irq_domain *irq; void *of_dev_id_data; }; static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) { return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio); } static inline struct mpc8xxx_gpio_chip * to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) { return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); } static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); } /* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs * defined as output cannot be determined by reading GPDAT register, * so we use shadow data register instead. The status of input pins * is determined by reading GPDAT register. */ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) { u32 val; struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); } static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); } static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); if (val) mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio); else mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio); out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; mpc8xxx_gpio_set(gc, gpio, val); spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { /* GPIO 28..31 are input only on MPC5121 */ if (gpio >= 28) return -EINVAL; return mpc8xxx_gpio_dir_out(gc, gpio, val); } static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS) return irq_create_mapping(mpc8xxx_gc->irq, offset); else return -ENXIO; } static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned int mask; mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); if (mask) generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 32 - ffs(mask))); chip->irq_eoi(&desc->irq_data); } static void mpc8xxx_irq_unmask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_mask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_ack(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); } static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long gpio = irqd_to_hwirq(d); void __iomem *reg; unsigned int shift; unsigned long flags; if (gpio < 16) { reg = mm->regs + GPIO_ICR; shift = (15 - gpio) * 2; } else { reg = mm->regs + GPIO_ICR2; shift = (15 - (gpio % 16)) * 2; } switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 2 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 1 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(reg, 3 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static struct irq_chip mpc8xxx_irq_chip = { .name = "mpc8xxx-gpio", .irq_unmask = mpc8xxx_irq_unmask, .irq_mask = mpc8xxx_irq_mask, .irq_ack = mpc8xxx_irq_ack, .irq_set_type = mpc8xxx_irq_set_type, }; static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; if (mpc8xxx_gc->of_dev_id_data) mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static struct irq_domain_ops mpc8xxx_gpio_irq_ops = { .map = mpc8xxx_gpio_irq_map, .xlate = irq_domain_xlate_twocell, }; static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { { .compatible = "fsl,mpc8349-gpio", }, { .compatible = "fsl,mpc8572-gpio", }, { .compatible = "fsl,mpc8610-gpio", }, { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, { .compatible = "fsl,pq3-gpio", }, { .compatible = "fsl,qoriq-gpio", }, {} }; static void __init mpc8xxx_add_controller(struct device_node *np) { struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; unsigned hwirq; int ret; mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) { ret = -ENOMEM; goto err; } spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ? mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out; gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ? mpc8572_gpio_get : mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) goto err; hwirq = irq_of_parse_and_map(np, 0); if (hwirq == NO_IRQ) goto skip_irq; mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); if (!mpc8xxx_gc->irq) goto skip_irq; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(hwirq, mpc8xxx_gc); irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); skip_irq: return; err: pr_err("%s: registration failed with status %d\n", np->full_name, ret); kfree(mpc8xxx_gc); return; } static int __init mpc8xxx_add_gpiochips(void) { struct device_node *np; for_each_matching_node(np, mpc8xxx_gpio_ids) mpc8xxx_add_controller(np); return 0; } arch_initcall(mpc8xxx_add_gpiochips);
gpl-2.0
mialwe/mnics2
arch/arm/plat-samsung/dev-keypad.c
2706
1294
/* * linux/arch/arm/plat-samsung/dev-keypad.c * * Copyright (C) 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/keypad.h> static struct resource samsung_keypad_resources[] = { [0] = { .start = SAMSUNG_PA_KEYPAD, .end = SAMSUNG_PA_KEYPAD + 0x20 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_KEYPAD, .end = IRQ_KEYPAD, .flags = IORESOURCE_IRQ, }, }; struct platform_device samsung_device_keypad = { .name = "samsung-keypad", .id = -1, .num_resources = ARRAY_SIZE(samsung_keypad_resources), .resource = samsung_keypad_resources, }; void __init samsung_keypad_set_platdata(struct samsung_keypad_platdata *pd) { struct samsung_keypad_platdata *npd; npd = s3c_set_platdata(pd, sizeof(struct samsung_keypad_platdata), &samsung_device_keypad); if (!npd->cfg_gpio) npd->cfg_gpio = samsung_keypad_cfg_gpio; }
gpl-2.0
somcom3x/kernel_samsung_msm8660-common
drivers/sbus/char/openprom.c
3474
16248
/* * Linux/SPARC PROM Configuration Driver * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * * This character device driver allows user programs to access the * PROM device tree. It is compatible with the SunOS /dev/openprom * driver and the NetBSD /dev/openprom driver. The SunOS eeprom * utility works without any modifications. * * The driver uses a minor number under the misc device major. The * file read/write mode determines the type of access to the PROM. * Interrupts are disabled whenever the driver calls into the PROM for * sanity's sake. */ /* This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/fs.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/openpromio.h> #ifdef CONFIG_PCI #include <linux/pci.h> #endif MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); MODULE_DESCRIPTION("OPENPROM Configuration Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); MODULE_ALIAS_MISCDEV(SUN_OPENPROM_MINOR); /* Private data kept by the driver for each descriptor. */ typedef struct openprom_private_data { struct device_node *current_node; /* Current node for SunOS ioctls. */ struct device_node *lastnode; /* Last valid node used by BSD ioctls. */ } DATA; /* ID of the PROM node containing all of the EEPROM options. */ static DEFINE_MUTEX(openprom_mutex); static struct device_node *options_node; /* * Copy an openpromio structure into kernel space from user space. * This routine does error checking to make sure that all memory * accesses are within bounds. A pointer to the allocated openpromio * structure will be placed in "*opp_p". Return value is the length * of the user supplied buffer. */ static int copyin(struct openpromio __user *info, struct openpromio **opp_p) { unsigned int bufsize; if (!info || !opp_p) return -EFAULT; if (get_user(bufsize, &info->oprom_size)) return -EFAULT; if (bufsize == 0) return -EINVAL; /* If the bufsize is too large, just limit it. * Fix from Jason Rappleye. */ if (bufsize > OPROMMAXPARAM) bufsize = OPROMMAXPARAM; if (!(*opp_p = kzalloc(sizeof(int) + bufsize + 1, GFP_KERNEL))) return -ENOMEM; if (copy_from_user(&(*opp_p)->oprom_array, &info->oprom_array, bufsize)) { kfree(*opp_p); return -EFAULT; } return bufsize; } static int getstrings(struct openpromio __user *info, struct openpromio **opp_p) { int n, bufsize; char c; if (!info || !opp_p) return -EFAULT; if (!(*opp_p = kzalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL))) return -ENOMEM; (*opp_p)->oprom_size = 0; n = bufsize = 0; while ((n < 2) && (bufsize < OPROMMAXPARAM)) { if (get_user(c, &info->oprom_array[bufsize])) { kfree(*opp_p); return -EFAULT; } if (c == '\0') n++; (*opp_p)->oprom_array[bufsize++] = c; } if (!n) { kfree(*opp_p); return -EINVAL; } return bufsize; } /* * Copy an openpromio structure in kernel space back to user space. */ static int copyout(void __user *info, struct openpromio *opp, int len) { if (copy_to_user(info, opp, len)) return -EFAULT; return 0; } static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) { const void *pval; int len; if (!dp || !(pval = of_get_property(dp, op->oprom_array, &len)) || len <= 0 || len > bufsize) return copyout(argp, op, sizeof(int)); memcpy(op->oprom_array, pval, len); op->oprom_array[len] = '\0'; op->oprom_size = len; return copyout(argp, op, sizeof(int) + bufsize); } static int opromnxtprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) { struct property *prop; int len; if (!dp) return copyout(argp, op, sizeof(int)); if (op->oprom_array[0] == '\0') { prop = dp->properties; if (!prop) return copyout(argp, op, sizeof(int)); len = strlen(prop->name); } else { prop = of_find_property(dp, op->oprom_array, NULL); if (!prop || !prop->next || (len = strlen(prop->next->name)) + 1 > bufsize) return copyout(argp, op, sizeof(int)); prop = prop->next; } memcpy(op->oprom_array, prop->name, len); op->oprom_array[len] = '\0'; op->oprom_size = ++len; return copyout(argp, op, sizeof(int) + bufsize); } static int opromsetopt(struct device_node *dp, struct openpromio *op, int bufsize) { char *buf = op->oprom_array + strlen(op->oprom_array) + 1; int len = op->oprom_array + bufsize - buf; return of_set_property(options_node, op->oprom_array, buf, len); } static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data) { phandle ph; BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); if (bufsize < sizeof(phandle)) return -EINVAL; ph = *((int *) op->oprom_array); if (ph) { dp = of_find_node_by_phandle(ph); if (!dp) return -EINVAL; switch (cmd) { case OPROMNEXT: dp = dp->sibling; break; case OPROMCHILD: dp = dp->child; break; case OPROMSETCUR: default: break; }; } else { /* Sibling of node zero is the root node. */ if (cmd != OPROMNEXT) return -EINVAL; dp = of_find_node_by_path("/"); } ph = 0; if (dp) ph = dp->phandle; data->current_node = dp; *((int *) op->oprom_array) = ph; op->oprom_size = sizeof(phandle); return copyout(argp, op, bufsize + sizeof(int)); } static int oprompci2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data) { int err = -EINVAL; if (bufsize >= 2*sizeof(int)) { #ifdef CONFIG_PCI struct pci_dev *pdev; struct device_node *dp; pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], ((int *) op->oprom_array)[1]); dp = pci_device_to_OF_node(pdev); data->current_node = dp; *((int *)op->oprom_array) = dp->phandle; op->oprom_size = sizeof(int); err = copyout(argp, op, bufsize + sizeof(int)); pci_dev_put(pdev); #endif } return err; } static int oprompath2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data) { phandle ph = 0; dp = of_find_node_by_path(op->oprom_array); if (dp) ph = dp->phandle; data->current_node = dp; *((int *)op->oprom_array) = ph; op->oprom_size = sizeof(int); return copyout(argp, op, bufsize + sizeof(int)); } static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsize) { char *buf = saved_command_line; int len = strlen(buf); if (len > bufsize) return -EINVAL; strcpy(op->oprom_array, buf); op->oprom_size = len; return copyout(argp, op, bufsize + sizeof(int)); } /* * SunOS and Solaris /dev/openprom ioctl calls. */ static long openprom_sunos_ioctl(struct file * file, unsigned int cmd, unsigned long arg, struct device_node *dp) { DATA *data = file->private_data; struct openpromio *opp = NULL; int bufsize, error = 0; static int cnt; void __user *argp = (void __user *)arg; if (cmd == OPROMSETOPT) bufsize = getstrings(argp, &opp); else bufsize = copyin(argp, &opp); if (bufsize < 0) return bufsize; mutex_lock(&openprom_mutex); switch (cmd) { case OPROMGETOPT: case OPROMGETPROP: error = opromgetprop(argp, dp, opp, bufsize); break; case OPROMNXTOPT: case OPROMNXTPROP: error = opromnxtprop(argp, dp, opp, bufsize); break; case OPROMSETOPT: case OPROMSETOPT2: error = opromsetopt(dp, opp, bufsize); break; case OPROMNEXT: case OPROMCHILD: case OPROMSETCUR: error = opromnext(argp, cmd, dp, opp, bufsize, data); break; case OPROMPCI2NODE: error = oprompci2node(argp, dp, opp, bufsize, data); break; case OPROMPATH2NODE: error = oprompath2node(argp, dp, opp, bufsize, data); break; case OPROMGETBOOTARGS: error = opromgetbootargs(argp, opp, bufsize); break; case OPROMU2P: case OPROMGETCONS: case OPROMGETFBNAME: if (cnt++ < 10) printk(KERN_INFO "openprom_sunos_ioctl: unimplemented ioctl\n"); error = -EINVAL; break; default: if (cnt++ < 10) printk(KERN_INFO "openprom_sunos_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg); error = -EINVAL; break; } kfree(opp); mutex_unlock(&openprom_mutex); return error; } static struct device_node *get_node(phandle n, DATA *data) { struct device_node *dp = of_find_node_by_phandle(n); if (dp) data->lastnode = dp; return dp; } /* Copy in a whole string from userspace into kernelspace. */ static int copyin_string(char __user *user, size_t len, char **ptr) { char *tmp; if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0) return -EINVAL; tmp = kmalloc(len + 1, GFP_KERNEL); if (!tmp) return -ENOMEM; if (copy_from_user(tmp, user, len)) { kfree(tmp); return -EFAULT; } tmp[len] = '\0'; *ptr = tmp; return 0; } /* * NetBSD /dev/openprom ioctl calls. */ static int opiocget(void __user *argp, DATA *data) { struct opiocdesc op; struct device_node *dp; char *str; const void *pval; int err, len; if (copy_from_user(&op, argp, sizeof(op))) return -EFAULT; dp = get_node(op.op_nodeid, data); err = copyin_string(op.op_name, op.op_namelen, &str); if (err) return err; pval = of_get_property(dp, str, &len); err = 0; if (!pval || len > op.op_buflen) { err = -EINVAL; } else { op.op_buflen = len; if (copy_to_user(argp, &op, sizeof(op)) || copy_to_user(op.op_buf, pval, len)) err = -EFAULT; } kfree(str); return err; } static int opiocnextprop(void __user *argp, DATA *data) { struct opiocdesc op; struct device_node *dp; struct property *prop; char *str; int err, len; if (copy_from_user(&op, argp, sizeof(op))) return -EFAULT; dp = get_node(op.op_nodeid, data); if (!dp) return -EINVAL; err = copyin_string(op.op_name, op.op_namelen, &str); if (err) return err; if (str[0] == '\0') { prop = dp->properties; } else { prop = of_find_property(dp, str, NULL); if (prop) prop = prop->next; } kfree(str); if (!prop) len = 0; else len = prop->length; if (len > op.op_buflen) len = op.op_buflen; if (copy_to_user(argp, &op, sizeof(op))) return -EFAULT; if (len && copy_to_user(op.op_buf, prop->value, len)) return -EFAULT; return 0; } static int opiocset(void __user *argp, DATA *data) { struct opiocdesc op; struct device_node *dp; char *str, *tmp; int err; if (copy_from_user(&op, argp, sizeof(op))) return -EFAULT; dp = get_node(op.op_nodeid, data); if (!dp) return -EINVAL; err = copyin_string(op.op_name, op.op_namelen, &str); if (err) return err; err = copyin_string(op.op_buf, op.op_buflen, &tmp); if (err) { kfree(str); return err; } err = of_set_property(dp, str, tmp, op.op_buflen); kfree(str); kfree(tmp); return err; } static int opiocgetnext(unsigned int cmd, void __user *argp) { struct device_node *dp; phandle nd; BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); if (copy_from_user(&nd, argp, sizeof(phandle))) return -EFAULT; if (nd == 0) { if (cmd != OPIOCGETNEXT) return -EINVAL; dp = of_find_node_by_path("/"); } else { dp = of_find_node_by_phandle(nd); nd = 0; if (dp) { if (cmd == OPIOCGETNEXT) dp = dp->sibling; else dp = dp->child; } } if (dp) nd = dp->phandle; if (copy_to_user(argp, &nd, sizeof(phandle))) return -EFAULT; return 0; } static int openprom_bsd_ioctl(struct file * file, unsigned int cmd, unsigned long arg) { DATA *data = file->private_data; void __user *argp = (void __user *)arg; int err; mutex_lock(&openprom_mutex); switch (cmd) { case OPIOCGET: err = opiocget(argp, data); break; case OPIOCNEXTPROP: err = opiocnextprop(argp, data); break; case OPIOCSET: err = opiocset(argp, data); break; case OPIOCGETOPTNODE: BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); err = 0; if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) err = -EFAULT; break; case OPIOCGETNEXT: case OPIOCGETCHILD: err = opiocgetnext(cmd, argp); break; default: err = -EINVAL; break; }; mutex_unlock(&openprom_mutex); return err; } /* * Handoff control to the correct ioctl handler. */ static long openprom_ioctl(struct file * file, unsigned int cmd, unsigned long arg) { DATA *data = file->private_data; switch (cmd) { case OPROMGETOPT: case OPROMNXTOPT: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; return openprom_sunos_ioctl(file, cmd, arg, options_node); case OPROMSETOPT: case OPROMSETOPT2: if ((file->f_mode & FMODE_WRITE) == 0) return -EPERM; return openprom_sunos_ioctl(file, cmd, arg, options_node); case OPROMNEXT: case OPROMCHILD: case OPROMGETPROP: case OPROMNXTPROP: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; return openprom_sunos_ioctl(file, cmd, arg, data->current_node); case OPROMU2P: case OPROMGETCONS: case OPROMGETFBNAME: case OPROMGETBOOTARGS: case OPROMSETCUR: case OPROMPCI2NODE: case OPROMPATH2NODE: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; return openprom_sunos_ioctl(file, cmd, arg, NULL); case OPIOCGET: case OPIOCNEXTPROP: case OPIOCGETOPTNODE: case OPIOCGETNEXT: case OPIOCGETCHILD: if ((file->f_mode & FMODE_READ) == 0) return -EBADF; return openprom_bsd_ioctl(file,cmd,arg); case OPIOCSET: if ((file->f_mode & FMODE_WRITE) == 0) return -EBADF; return openprom_bsd_ioctl(file,cmd,arg); default: return -EINVAL; }; } static long openprom_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long rval = -ENOTTY; /* * SunOS/Solaris only, the NetBSD one's have embedded pointers in * the arg which we'd need to clean up... */ switch (cmd) { case OPROMGETOPT: case OPROMSETOPT: case OPROMNXTOPT: case OPROMSETOPT2: case OPROMNEXT: case OPROMCHILD: case OPROMGETPROP: case OPROMNXTPROP: case OPROMU2P: case OPROMGETCONS: case OPROMGETFBNAME: case OPROMGETBOOTARGS: case OPROMSETCUR: case OPROMPCI2NODE: case OPROMPATH2NODE: rval = openprom_ioctl(file, cmd, arg); break; } return rval; } static int openprom_open(struct inode * inode, struct file * file) { DATA *data; data = kmalloc(sizeof(DATA), GFP_KERNEL); if (!data) return -ENOMEM; mutex_lock(&openprom_mutex); data->current_node = of_find_node_by_path("/"); data->lastnode = data->current_node; file->private_data = (void *) data; mutex_unlock(&openprom_mutex); return 0; } static int openprom_release(struct inode * inode, struct file * file) { kfree(file->private_data); return 0; } static const struct file_operations openprom_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = openprom_ioctl, .compat_ioctl = openprom_compat_ioctl, .open = openprom_open, .release = openprom_release, }; static struct miscdevice openprom_dev = { .minor = SUN_OPENPROM_MINOR, .name = "openprom", .fops = &openprom_fops, }; static int __init openprom_init(void) { struct device_node *dp; int err; err = misc_register(&openprom_dev); if (err) return err; dp = of_find_node_by_path("/"); dp = dp->child; while (dp) { if (!strcmp(dp->name, "options")) break; dp = dp->sibling; } options_node = dp; if (!options_node) { misc_deregister(&openprom_dev); return -EIO; } return 0; } static void __exit openprom_cleanup(void) { misc_deregister(&openprom_dev); } module_init(openprom_init); module_exit(openprom_cleanup);
gpl-2.0
jazzk/I9505GUEUCNF3
fs/gfs2/dir.c
4754
48466
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ /* * Implements Extendible Hashing as described in: * "Extendible Hashing" by Fagin, et al in * __ACM Trans. on Database Systems__, Sept 1979. * * * Here's the layout of dirents which is essentially the same as that of ext2 * within a single block. The field de_name_len is the number of bytes * actually required for the name (no null terminator). The field de_rec_len * is the number of bytes allocated to the dirent. The offset of the next * dirent in the block is (dirent + dirent->de_rec_len). When a dirent is * deleted, the preceding dirent inherits its allocated space, ie * prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained * by adding de_rec_len to the current dirent, this essentially causes the * deleted dirent to get jumped over when iterating through all the dirents. * * When deleting the first dirent in a block, there is no previous dirent so * the field de_ino is set to zero to designate it as deleted. When allocating * a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the * first dirent has (de_ino == 0) and de_rec_len is large enough, this first * dirent is allocated. Otherwise it must go through all the 'used' dirents * searching for one in which the amount of total space minus the amount of * used space will provide enough space for the new dirent. * * There are two types of blocks in which dirents reside. In a stuffed dinode, * the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the * beginning of the leaf block. The dirents reside in leaves when * * dip->i_diskflags & GFS2_DIF_EXHASH is true * * Otherwise, the dirents are "linear", within a single stuffed dinode block. * * When the dirents are in leaves, the actual contents of the directory file are * used as an array of 64-bit block pointers pointing to the leaf blocks. The * dirents are NOT in the directory file itself. There can be more than one * block pointer in the array that points to the same leaf. In fact, when a * directory is first converted from linear to exhash, all of the pointers * point to the same leaf. * * When a leaf is completely full, the size of the hash table can be * doubled unless it is already at the maximum size which is hard coded into * GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list, * but never before the maximum hash table size has been reached. */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/buffer_head.h> #include <linux/sort.h> #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include <linux/vmalloc.h> #include "gfs2.h" #include "incore.h" #include "dir.h" #include "glock.h" #include "inode.h" #include "meta_io.h" #include "quota.h" #include "rgrp.h" #include "trans.h" #include "bmap.h" #include "util.h" #define IS_LEAF 1 /* Hashed (leaf) directory */ #define IS_DINODE 2 /* Linear (stuffed dinode block) directory */ #define MAX_RA_BLOCKS 32 /* max read-ahead blocks */ #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) struct qstr gfs2_qdot __read_mostly; struct qstr gfs2_qdotdot __read_mostly; typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque); int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, struct buffer_head **bhp) { struct buffer_head *bh; bh = gfs2_meta_new(ip->i_gl, block); gfs2_trans_add_bh(ip->i_gl, bh, 1); gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD); gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); *bhp = bh; return 0; } static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block, struct buffer_head **bhp) { struct buffer_head *bh; int error; error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, &bh); if (error) return error; if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { brelse(bh); return -EIO; } *bhp = bh; return 0; } static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, unsigned int offset, unsigned int size) { struct buffer_head *dibh; int error; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) return error; gfs2_trans_add_bh(ip->i_gl, dibh, 1); memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); if (ip->i_inode.i_size < offset + size) i_size_write(&ip->i_inode, offset + size); ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); return size; } /** * gfs2_dir_write_data - Write directory information to the inode * @ip: The GFS2 inode * @buf: The buffer containing information to be written * @offset: The file offset to start writing at * @size: The amount of data to write * * Returns: The number of bytes correctly written or error code */ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf, u64 offset, unsigned int size) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; u64 lblock, dblock; u32 extlen = 0; unsigned int o; int copied = 0; int error = 0; int new = 0; if (!size) return 0; if (gfs2_is_stuffed(ip) && offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset, size); if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) return -EINVAL; if (gfs2_is_stuffed(ip)) { error = gfs2_unstuff_dinode(ip, NULL); if (error) return error; } lblock = offset; o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); while (copied < size) { unsigned int amount; struct buffer_head *bh; amount = size - copied; if (amount > sdp->sd_sb.sb_bsize - o) amount = sdp->sd_sb.sb_bsize - o; if (!extlen) { new = 1; error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen); if (error) goto fail; error = -EIO; if (gfs2_assert_withdraw(sdp, dblock)) goto fail; } if (amount == sdp->sd_jbsize || new) error = gfs2_dir_get_new_buffer(ip, dblock, &bh); else error = gfs2_dir_get_existing_buffer(ip, dblock, &bh); if (error) goto fail; gfs2_trans_add_bh(ip->i_gl, bh, 1); memcpy(bh->b_data + o, buf, amount); brelse(bh); buf += amount; copied += amount; lblock++; dblock++; extlen--; o = sizeof(struct gfs2_meta_header); } out: error = gfs2_meta_inode_buffer(ip, &dibh); if (error) return error; if (ip->i_inode.i_size < offset + copied) i_size_write(&ip->i_inode, offset + copied); ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); return copied; fail: if (copied) goto out; return error; } static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf, unsigned int size) { struct buffer_head *dibh; int error; error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size); brelse(dibh); } return (error) ? error : size; } /** * gfs2_dir_read_data - Read a data from a directory inode * @ip: The GFS2 Inode * @buf: The buffer to place result into * @size: Amount of data to transfer * * Returns: The amount of data actually copied or the error */ static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf, unsigned int size) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); u64 lblock, dblock; u32 extlen = 0; unsigned int o; int copied = 0; int error = 0; if (gfs2_is_stuffed(ip)) return gfs2_dir_read_stuffed(ip, buf, size); if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) return -EINVAL; lblock = 0; o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); while (copied < size) { unsigned int amount; struct buffer_head *bh; int new; amount = size - copied; if (amount > sdp->sd_sb.sb_bsize - o) amount = sdp->sd_sb.sb_bsize - o; if (!extlen) { new = 0; error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen); if (error || !dblock) goto fail; BUG_ON(extlen < 1); bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); } else { error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh); if (error) goto fail; } error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD); if (error) { brelse(bh); goto fail; } dblock++; extlen--; memcpy(buf, bh->b_data + o, amount); brelse(bh); buf += (amount/sizeof(__be64)); copied += amount; lblock++; o = sizeof(struct gfs2_meta_header); } return copied; fail: return (copied) ? copied : error; } /** * gfs2_dir_get_hash_table - Get pointer to the dir hash table * @ip: The inode in question * * Returns: The hash table or an error */ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) { struct inode *inode = &ip->i_inode; int ret; u32 hsize; __be64 *hc; BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH)); hc = ip->i_hash_cache; if (hc) return hc; hsize = 1 << ip->i_depth; hsize *= sizeof(__be64); if (hsize != i_size_read(&ip->i_inode)) { gfs2_consist_inode(ip); return ERR_PTR(-EIO); } hc = kmalloc(hsize, GFP_NOFS); ret = -ENOMEM; if (hc == NULL) return ERR_PTR(-ENOMEM); ret = gfs2_dir_read_data(ip, hc, hsize); if (ret < 0) { kfree(hc); return ERR_PTR(ret); } spin_lock(&inode->i_lock); if (ip->i_hash_cache) kfree(hc); else ip->i_hash_cache = hc; spin_unlock(&inode->i_lock); return ip->i_hash_cache; } /** * gfs2_dir_hash_inval - Invalidate dir hash * @ip: The directory inode * * Must be called with an exclusive glock, or during glock invalidation. */ void gfs2_dir_hash_inval(struct gfs2_inode *ip) { __be64 *hc = ip->i_hash_cache; ip->i_hash_cache = NULL; kfree(hc); } static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) { return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0; } static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, const struct qstr *name, int ret) { if (!gfs2_dirent_sentinel(dent) && be32_to_cpu(dent->de_hash) == name->hash && be16_to_cpu(dent->de_name_len) == name->len && memcmp(dent+1, name->name, name->len) == 0) return ret; return 0; } static int gfs2_dirent_find(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque) { return __gfs2_dirent_find(dent, name, 1); } static int gfs2_dirent_prev(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque) { return __gfs2_dirent_find(dent, name, 2); } /* * name->name holds ptr to start of block. * name->len holds size of block. */ static int gfs2_dirent_last(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque) { const char *start = name->name; const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len); if (name->len == (end - start)) return 1; return 0; } static int gfs2_dirent_find_space(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque) { unsigned required = GFS2_DIRENT_SIZE(name->len); unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); unsigned totlen = be16_to_cpu(dent->de_rec_len); if (gfs2_dirent_sentinel(dent)) actual = 0; if (totlen - actual >= required) return 1; return 0; } struct dirent_gather { const struct gfs2_dirent **pdent; unsigned offset; }; static int gfs2_dirent_gather(const struct gfs2_dirent *dent, const struct qstr *name, void *opaque) { struct dirent_gather *g = opaque; if (!gfs2_dirent_sentinel(dent)) { g->pdent[g->offset++] = dent; } return 0; } /* * Other possible things to check: * - Inode located within filesystem size (and on valid block) * - Valid directory entry type * Not sure how heavy-weight we want to make this... could also check * hash is correct for example, but that would take a lot of extra time. * For now the most important thing is to check that the various sizes * are correct. */ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, unsigned int size, unsigned int len, int first) { const char *msg = "gfs2_dirent too small"; if (unlikely(size < sizeof(struct gfs2_dirent))) goto error; msg = "gfs2_dirent misaligned"; if (unlikely(offset & 0x7)) goto error; msg = "gfs2_dirent points beyond end of block"; if (unlikely(offset + size > len)) goto error; msg = "zero inode number"; if (unlikely(!first && gfs2_dirent_sentinel(dent))) goto error; msg = "name length is greater than space in dirent"; if (!gfs2_dirent_sentinel(dent) && unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > size)) goto error; return 0; error: printk(KERN_WARNING "gfs2_check_dirent: %s (%s)\n", msg, first ? "first in block" : "not first in block"); return -EIO; } static int gfs2_dirent_offset(const void *buf) { const struct gfs2_meta_header *h = buf; int offset; BUG_ON(buf == NULL); switch(be32_to_cpu(h->mh_type)) { case GFS2_METATYPE_LF: offset = sizeof(struct gfs2_leaf); break; case GFS2_METATYPE_DI: offset = sizeof(struct gfs2_dinode); break; default: goto wrong_type; } return offset; wrong_type: printk(KERN_WARNING "gfs2_scan_dirent: wrong block type %u\n", be32_to_cpu(h->mh_type)); return -1; } static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, unsigned int len, gfs2_dscan_t scan, const struct qstr *name, void *opaque) { struct gfs2_dirent *dent, *prev; unsigned offset; unsigned size; int ret = 0; ret = gfs2_dirent_offset(buf); if (ret < 0) goto consist_inode; offset = ret; prev = NULL; dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); if (gfs2_check_dirent(dent, offset, size, len, 1)) goto consist_inode; do { ret = scan(dent, name, opaque); if (ret) break; offset += size; if (offset == len) break; prev = dent; dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); if (gfs2_check_dirent(dent, offset, size, len, 0)) goto consist_inode; } while(1); switch(ret) { case 0: return NULL; case 1: return dent; case 2: return prev ? prev : dent; default: BUG_ON(ret > 0); return ERR_PTR(ret); } consist_inode: gfs2_consist_inode(GFS2_I(inode)); return ERR_PTR(-EIO); } static int dirent_check_reclen(struct gfs2_inode *dip, const struct gfs2_dirent *d, const void *end_p) { const void *ptr = d; u16 rec_len = be16_to_cpu(d->de_rec_len); if (unlikely(rec_len < sizeof(struct gfs2_dirent))) goto broken; ptr += rec_len; if (ptr < end_p) return rec_len; if (ptr == end_p) return -ENOENT; broken: gfs2_consist_inode(dip); return -EIO; } /** * dirent_next - Next dirent * @dip: the directory * @bh: The buffer * @dent: Pointer to list of dirents * * Returns: 0 on success, error code otherwise */ static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh, struct gfs2_dirent **dent) { struct gfs2_dirent *cur = *dent, *tmp; char *bh_end = bh->b_data + bh->b_size; int ret; ret = dirent_check_reclen(dip, cur, bh_end); if (ret < 0) return ret; tmp = (void *)cur + ret; ret = dirent_check_reclen(dip, tmp, bh_end); if (ret == -EIO) return ret; /* Only the first dent could ever have de_inum.no_addr == 0 */ if (gfs2_dirent_sentinel(tmp)) { gfs2_consist_inode(dip); return -EIO; } *dent = tmp; return 0; } /** * dirent_del - Delete a dirent * @dip: The GFS2 inode * @bh: The buffer * @prev: The previous dirent * @cur: The current dirent * */ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh, struct gfs2_dirent *prev, struct gfs2_dirent *cur) { u16 cur_rec_len, prev_rec_len; if (gfs2_dirent_sentinel(cur)) { gfs2_consist_inode(dip); return; } gfs2_trans_add_bh(dip->i_gl, bh, 1); /* If there is no prev entry, this is the first entry in the block. The de_rec_len is already as big as it needs to be. Just zero out the inode number and return. */ if (!prev) { cur->de_inum.no_addr = 0; cur->de_inum.no_formal_ino = 0; return; } /* Combine this dentry with the previous one. */ prev_rec_len = be16_to_cpu(prev->de_rec_len); cur_rec_len = be16_to_cpu(cur->de_rec_len); if ((char *)prev + prev_rec_len != (char *)cur) gfs2_consist_inode(dip); if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size) gfs2_consist_inode(dip); prev_rec_len += cur_rec_len; prev->de_rec_len = cpu_to_be16(prev_rec_len); } /* * Takes a dent from which to grab space as an argument. Returns the * newly created dent. */ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode, struct gfs2_dirent *dent, const struct qstr *name, struct buffer_head *bh) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_dirent *ndent; unsigned offset = 0, totlen; if (!gfs2_dirent_sentinel(dent)) offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); totlen = be16_to_cpu(dent->de_rec_len); BUG_ON(offset + name->len > totlen); gfs2_trans_add_bh(ip->i_gl, bh, 1); ndent = (struct gfs2_dirent *)((char *)dent + offset); dent->de_rec_len = cpu_to_be16(offset); gfs2_qstr2dirent(name, totlen - offset, ndent); return ndent; } static struct gfs2_dirent *gfs2_dirent_alloc(struct inode *inode, struct buffer_head *bh, const struct qstr *name) { struct gfs2_dirent *dent; dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, gfs2_dirent_find_space, name, NULL); if (!dent || IS_ERR(dent)) return dent; return gfs2_init_dirent(inode, dent, name, bh); } static int get_leaf(struct gfs2_inode *dip, u64 leaf_no, struct buffer_head **bhp) { int error; error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, bhp); if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) { /* printk(KERN_INFO "block num=%llu\n", leaf_no); */ error = -EIO; } return error; } /** * get_leaf_nr - Get a leaf number associated with the index * @dip: The GFS2 inode * @index: * @leaf_out: * * Returns: 0 on success, error code otherwise */ static int get_leaf_nr(struct gfs2_inode *dip, u32 index, u64 *leaf_out) { __be64 *hash; hash = gfs2_dir_get_hash_table(dip); if (IS_ERR(hash)) return PTR_ERR(hash); *leaf_out = be64_to_cpu(*(hash + index)); return 0; } static int get_first_leaf(struct gfs2_inode *dip, u32 index, struct buffer_head **bh_out) { u64 leaf_no; int error; error = get_leaf_nr(dip, index, &leaf_no); if (!error) error = get_leaf(dip, leaf_no, bh_out); return error; } static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, const struct qstr *name, gfs2_dscan_t scan, struct buffer_head **pbh) { struct buffer_head *bh; struct gfs2_dirent *dent; struct gfs2_inode *ip = GFS2_I(inode); int error; if (ip->i_diskflags & GFS2_DIF_EXHASH) { struct gfs2_leaf *leaf; unsigned hsize = 1 << ip->i_depth; unsigned index; u64 ln; if (hsize * sizeof(u64) != i_size_read(inode)) { gfs2_consist_inode(ip); return ERR_PTR(-EIO); } index = name->hash >> (32 - ip->i_depth); error = get_first_leaf(ip, index, &bh); if (error) return ERR_PTR(error); do { dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL); if (dent) goto got_dent; leaf = (struct gfs2_leaf *)bh->b_data; ln = be64_to_cpu(leaf->lf_next); brelse(bh); if (!ln) break; error = get_leaf(ip, ln, &bh); } while(!error); return error ? ERR_PTR(error) : NULL; } error = gfs2_meta_inode_buffer(ip, &bh); if (error) return ERR_PTR(error); dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL); got_dent: if (unlikely(dent == NULL || IS_ERR(dent))) { brelse(bh); bh = NULL; } *pbh = bh; return dent; } static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth) { struct gfs2_inode *ip = GFS2_I(inode); unsigned int n = 1; u64 bn; int error; struct buffer_head *bh; struct gfs2_leaf *leaf; struct gfs2_dirent *dent; struct qstr name = { .name = "", .len = 0, .hash = 0 }; error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); if (error) return NULL; bh = gfs2_meta_new(ip->i_gl, bn); if (!bh) return NULL; gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1); gfs2_trans_add_bh(ip->i_gl, bh, 1); gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF); leaf = (struct gfs2_leaf *)bh->b_data; leaf->lf_depth = cpu_to_be16(depth); leaf->lf_entries = 0; leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE); leaf->lf_next = 0; memset(leaf->lf_reserved, 0, sizeof(leaf->lf_reserved)); dent = (struct gfs2_dirent *)(leaf+1); gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent); *pbh = bh; return leaf; } /** * dir_make_exhash - Convert a stuffed directory into an ExHash directory * @dip: The GFS2 inode * * Returns: 0 on success, error code otherwise */ static int dir_make_exhash(struct inode *inode) { struct gfs2_inode *dip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_dirent *dent; struct qstr args; struct buffer_head *bh, *dibh; struct gfs2_leaf *leaf; int y; u32 x; __be64 *lp; u64 bn; int error; error = gfs2_meta_inode_buffer(dip, &dibh); if (error) return error; /* Turn over a new leaf */ leaf = new_leaf(inode, &bh, 0); if (!leaf) return -ENOSPC; bn = bh->b_blocknr; gfs2_assert(sdp, dip->i_entries < (1 << 16)); leaf->lf_entries = cpu_to_be16(dip->i_entries); /* Copy dirents */ gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh, sizeof(struct gfs2_dinode)); /* Find last entry */ x = 0; args.len = bh->b_size - sizeof(struct gfs2_dinode) + sizeof(struct gfs2_leaf); args.name = bh->b_data; dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size, gfs2_dirent_last, &args, NULL); if (!dent) { brelse(bh); brelse(dibh); return -EIO; } if (IS_ERR(dent)) { brelse(bh); brelse(dibh); return PTR_ERR(dent); } /* Adjust the last dirent's record length (Remember that dent still points to the last entry.) */ dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) + sizeof(struct gfs2_dinode) - sizeof(struct gfs2_leaf)); brelse(bh); /* We're done with the new leaf block, now setup the new hash table. */ gfs2_trans_add_bh(dip->i_gl, dibh, 1); gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); for (x = sdp->sd_hash_ptrs; x--; lp++) *lp = cpu_to_be64(bn); i_size_write(inode, sdp->sd_sb.sb_bsize / 2); gfs2_add_inode_blocks(&dip->i_inode, 1); dip->i_diskflags |= GFS2_DIF_EXHASH; for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; dip->i_depth = y; gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); return 0; } /** * dir_split_leaf - Split a leaf block into two * @dip: The GFS2 inode * @index: * @leaf_no: * * Returns: 0 on success, error code on failure */ static int dir_split_leaf(struct inode *inode, const struct qstr *name) { struct gfs2_inode *dip = GFS2_I(inode); struct buffer_head *nbh, *obh, *dibh; struct gfs2_leaf *nleaf, *oleaf; struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; u32 start, len, half_len, divider; u64 bn, leaf_no; __be64 *lp; u32 index; int x, moved = 0; int error; index = name->hash >> (32 - dip->i_depth); error = get_leaf_nr(dip, index, &leaf_no); if (error) return error; /* Get the old leaf block */ error = get_leaf(dip, leaf_no, &obh); if (error) return error; oleaf = (struct gfs2_leaf *)obh->b_data; if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) { brelse(obh); return 1; /* can't split */ } gfs2_trans_add_bh(dip->i_gl, obh, 1); nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1); if (!nleaf) { brelse(obh); return -ENOSPC; } bn = nbh->b_blocknr; /* Compute the start and len of leaf pointers in the hash table. */ len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth)); half_len = len >> 1; if (!half_len) { printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index); gfs2_consist_inode(dip); error = -EIO; goto fail_brelse; } start = (index & ~(len - 1)); /* Change the pointers. Don't bother distinguishing stuffed from non-stuffed. This code is complicated enough already. */ lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); if (!lp) { error = -ENOMEM; goto fail_brelse; } /* Change the pointers */ for (x = 0; x < half_len; x++) lp[x] = cpu_to_be64(bn); gfs2_dir_hash_inval(dip); error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), half_len * sizeof(u64)); if (error != half_len * sizeof(u64)) { if (error >= 0) error = -EIO; goto fail_lpfree; } kfree(lp); /* Compute the divider */ divider = (start + half_len) << (32 - dip->i_depth); /* Copy the entries */ dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf)); do { next = dent; if (dirent_next(dip, obh, &next)) next = NULL; if (!gfs2_dirent_sentinel(dent) && be32_to_cpu(dent->de_hash) < divider) { struct qstr str; str.name = (char*)(dent+1); str.len = be16_to_cpu(dent->de_name_len); str.hash = be32_to_cpu(dent->de_hash); new = gfs2_dirent_alloc(inode, nbh, &str); if (IS_ERR(new)) { error = PTR_ERR(new); break; } new->de_inum = dent->de_inum; /* No endian worries */ new->de_type = dent->de_type; /* No endian worries */ be16_add_cpu(&nleaf->lf_entries, 1); dirent_del(dip, obh, prev, dent); if (!oleaf->lf_entries) gfs2_consist_inode(dip); be16_add_cpu(&oleaf->lf_entries, -1); if (!prev) prev = dent; moved = 1; } else { prev = dent; } dent = next; } while (dent); oleaf->lf_depth = nleaf->lf_depth; error = gfs2_meta_inode_buffer(dip, &dibh); if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { gfs2_trans_add_bh(dip->i_gl, dibh, 1); gfs2_add_inode_blocks(&dip->i_inode, 1); gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); } brelse(obh); brelse(nbh); return error; fail_lpfree: kfree(lp); fail_brelse: brelse(obh); brelse(nbh); return error; } /** * dir_double_exhash - Double size of ExHash table * @dip: The GFS2 dinode * * Returns: 0 on success, error code on failure */ static int dir_double_exhash(struct gfs2_inode *dip) { struct buffer_head *dibh; u32 hsize; u32 hsize_bytes; __be64 *hc; __be64 *hc2, *h; int x; int error = 0; hsize = 1 << dip->i_depth; hsize_bytes = hsize * sizeof(__be64); hc = gfs2_dir_get_hash_table(dip); if (IS_ERR(hc)) return PTR_ERR(hc); h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS); if (!hc2) return -ENOMEM; error = gfs2_meta_inode_buffer(dip, &dibh); if (error) goto out_kfree; for (x = 0; x < hsize; x++) { *h++ = *hc; *h++ = *hc; hc++; } error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2); if (error != (hsize_bytes * 2)) goto fail; gfs2_dir_hash_inval(dip); dip->i_hash_cache = hc2; dip->i_depth++; gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); return 0; fail: /* Replace original hash table & size */ gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes); i_size_write(&dip->i_inode, hsize_bytes); gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); out_kfree: kfree(hc2); return error; } /** * compare_dents - compare directory entries by hash value * @a: first dent * @b: second dent * * When comparing the hash entries of @a to @b: * gt: returns 1 * lt: returns -1 * eq: returns 0 */ static int compare_dents(const void *a, const void *b) { const struct gfs2_dirent *dent_a, *dent_b; u32 hash_a, hash_b; int ret = 0; dent_a = *(const struct gfs2_dirent **)a; hash_a = be32_to_cpu(dent_a->de_hash); dent_b = *(const struct gfs2_dirent **)b; hash_b = be32_to_cpu(dent_b->de_hash); if (hash_a > hash_b) ret = 1; else if (hash_a < hash_b) ret = -1; else { unsigned int len_a = be16_to_cpu(dent_a->de_name_len); unsigned int len_b = be16_to_cpu(dent_b->de_name_len); if (len_a > len_b) ret = 1; else if (len_a < len_b) ret = -1; else ret = memcmp(dent_a + 1, dent_b + 1, len_a); } return ret; } /** * do_filldir_main - read out directory entries * @dip: The GFS2 inode * @offset: The offset in the file to read from * @opaque: opaque data to pass to filldir * @filldir: The function to pass entries to * @darr: an array of struct gfs2_dirent pointers to read * @entries: the number of entries in darr * @copied: pointer to int that's non-zero if a entry has been copied out * * Jump through some hoops to make sure that if there are hash collsions, * they are read out at the beginning of a buffer. We want to minimize * the possibility that they will fall into different readdir buffers or * that someone will want to seek to that location. * * Returns: errno, >0 on exception from filldir */ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, void *opaque, filldir_t filldir, const struct gfs2_dirent **darr, u32 entries, int *copied) { const struct gfs2_dirent *dent, *dent_next; u64 off, off_next; unsigned int x, y; int run = 0; int error = 0; sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL); dent_next = darr[0]; off_next = be32_to_cpu(dent_next->de_hash); off_next = gfs2_disk_hash2offset(off_next); for (x = 0, y = 1; x < entries; x++, y++) { dent = dent_next; off = off_next; if (y < entries) { dent_next = darr[y]; off_next = be32_to_cpu(dent_next->de_hash); off_next = gfs2_disk_hash2offset(off_next); if (off < *offset) continue; *offset = off; if (off_next == off) { if (*copied && !run) return 1; run = 1; } else run = 0; } else { if (off < *offset) continue; *offset = off; } error = filldir(opaque, (const char *)(dent + 1), be16_to_cpu(dent->de_name_len), off, be64_to_cpu(dent->de_inum.no_addr), be16_to_cpu(dent->de_type)); if (error) return 1; *copied = 1; } /* Increment the *offset by one, so the next time we come into the do_filldir fxn, we get the next entry instead of the last one in the current leaf */ (*offset)++; return 0; } static void *gfs2_alloc_sort_buffer(unsigned size) { void *ptr = NULL; if (size < KMALLOC_MAX_SIZE) ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); if (!ptr) ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); return ptr; } static void gfs2_free_sort_buffer(void *ptr) { if (is_vmalloc_addr(ptr)) vfree(ptr); else kfree(ptr); } static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, filldir_t filldir, int *copied, unsigned *depth, u64 leaf_no) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_leaf *lf; unsigned entries = 0, entries2 = 0; unsigned leaves = 0; const struct gfs2_dirent **darr, *dent; struct dirent_gather g; struct buffer_head **larr; int leaf = 0; int error, i; u64 lfn = leaf_no; do { error = get_leaf(ip, lfn, &bh); if (error) goto out; lf = (struct gfs2_leaf *)bh->b_data; if (leaves == 0) *depth = be16_to_cpu(lf->lf_depth); entries += be16_to_cpu(lf->lf_entries); leaves++; lfn = be64_to_cpu(lf->lf_next); brelse(bh); } while(lfn); if (!entries) return 0; error = -ENOMEM; /* * The extra 99 entries are not normally used, but are a buffer * zone in case the number of entries in the leaf is corrupt. * 99 is the maximum number of entries that can fit in a single * leaf block. */ larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *)); if (!larr) goto out; darr = (const struct gfs2_dirent **)(larr + leaves); g.pdent = darr; g.offset = 0; lfn = leaf_no; do { error = get_leaf(ip, lfn, &bh); if (error) goto out_free; lf = (struct gfs2_leaf *)bh->b_data; lfn = be64_to_cpu(lf->lf_next); if (lf->lf_entries) { entries2 += be16_to_cpu(lf->lf_entries); dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, gfs2_dirent_gather, NULL, &g); error = PTR_ERR(dent); if (IS_ERR(dent)) goto out_free; if (entries2 != g.offset) { fs_warn(sdp, "Number of entries corrupt in dir " "leaf %llu, entries2 (%u) != " "g.offset (%u)\n", (unsigned long long)bh->b_blocknr, entries2, g.offset); error = -EIO; goto out_free; } error = 0; larr[leaf++] = bh; } else { brelse(bh); } } while(lfn); BUG_ON(entries2 != entries); error = do_filldir_main(ip, offset, opaque, filldir, darr, entries, copied); out_free: for(i = 0; i < leaf; i++) brelse(larr[i]); gfs2_free_sort_buffer(larr); out: return error; } /** * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks. * * Note: we can't calculate each index like dir_e_read can because we don't * have the leaf, and therefore we don't have the depth, and therefore we * don't have the length. So we have to just read enough ahead to make up * for the loss of information. */ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, struct file_ra_state *f_ra) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_glock *gl = ip->i_gl; struct buffer_head *bh; u64 blocknr = 0, last; unsigned count; /* First check if we've already read-ahead for the whole range. */ if (index + MAX_RA_BLOCKS < f_ra->start) return; f_ra->start = max((pgoff_t)index, f_ra->start); for (count = 0; count < MAX_RA_BLOCKS; count++) { if (f_ra->start >= hsize) /* if exceeded the hash table */ break; last = blocknr; blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); f_ra->start++; if (blocknr == last) continue; bh = gfs2_getbuf(gl, blocknr, 1); if (trylock_buffer(bh)) { if (buffer_uptodate(bh)) { unlock_buffer(bh); brelse(bh); continue; } bh->b_end_io = end_buffer_read_sync; submit_bh(READA | REQ_META, bh); continue; } brelse(bh); } } /** * dir_e_read - Reads the entries from a directory into a filldir buffer * @dip: dinode pointer * @offset: the hash of the last entry read shifted to the right once * @opaque: buffer for the filldir function to fill * @filldir: points to the filldir function to use * * Returns: errno */ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, filldir_t filldir, struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); u32 hsize, len = 0; u32 hash, index; __be64 *lp; int copied = 0; int error = 0; unsigned depth = 0; hsize = 1 << dip->i_depth; hash = gfs2_dir_offset2hash(*offset); index = hash >> (32 - dip->i_depth); if (dip->i_hash_cache == NULL) f_ra->start = 0; lp = gfs2_dir_get_hash_table(dip); if (IS_ERR(lp)) return PTR_ERR(lp); gfs2_dir_readahead(inode, hsize, index, f_ra); while (index < hsize) { error = gfs2_dir_read_leaf(inode, offset, opaque, filldir, &copied, &depth, be64_to_cpu(lp[index])); if (error) break; len = 1 << (dip->i_depth - depth); index = (index & ~(len - 1)) + len; } if (error > 0) error = 0; return error; } int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, filldir_t filldir, struct file_ra_state *f_ra) { struct gfs2_inode *dip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct dirent_gather g; const struct gfs2_dirent **darr, *dent; struct buffer_head *dibh; int copied = 0; int error; if (!dip->i_entries) return 0; if (dip->i_diskflags & GFS2_DIF_EXHASH) return dir_e_read(inode, offset, opaque, filldir, f_ra); if (!gfs2_is_stuffed(dip)) { gfs2_consist_inode(dip); return -EIO; } error = gfs2_meta_inode_buffer(dip, &dibh); if (error) return error; error = -ENOMEM; /* 96 is max number of dirents which can be stuffed into an inode */ darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS); if (darr) { g.pdent = darr; g.offset = 0; dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size, gfs2_dirent_gather, NULL, &g); if (IS_ERR(dent)) { error = PTR_ERR(dent); goto out; } if (dip->i_entries != g.offset) { fs_warn(sdp, "Number of entries corrupt in dir %llu, " "ip->i_entries (%u) != g.offset (%u)\n", (unsigned long long)dip->i_no_addr, dip->i_entries, g.offset); error = -EIO; goto out; } error = do_filldir_main(dip, offset, opaque, filldir, darr, dip->i_entries, &copied); out: kfree(darr); } if (error > 0) error = 0; brelse(dibh); return error; } /** * gfs2_dir_search - Search a directory * @dip: The GFS2 inode * @filename: * @inode: * * This routine searches a directory for a file or another directory. * Assumes a glock is held on dip. * * Returns: errno */ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name) { struct buffer_head *bh; struct gfs2_dirent *dent; struct inode *inode; dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); if (dent) { if (IS_ERR(dent)) return ERR_CAST(dent); inode = gfs2_inode_lookup(dir->i_sb, be16_to_cpu(dent->de_type), be64_to_cpu(dent->de_inum.no_addr), be64_to_cpu(dent->de_inum.no_formal_ino), 0); brelse(bh); return inode; } return ERR_PTR(-ENOENT); } int gfs2_dir_check(struct inode *dir, const struct qstr *name, const struct gfs2_inode *ip) { struct buffer_head *bh; struct gfs2_dirent *dent; int ret = -ENOENT; dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); if (dent) { if (IS_ERR(dent)) return PTR_ERR(dent); if (ip) { if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr) goto out; if (be64_to_cpu(dent->de_inum.no_formal_ino) != ip->i_no_formal_ino) goto out; if (unlikely(IF2DT(ip->i_inode.i_mode) != be16_to_cpu(dent->de_type))) { gfs2_consist_inode(GFS2_I(dir)); ret = -EIO; goto out; } } ret = 0; out: brelse(bh); } return ret; } static int dir_new_leaf(struct inode *inode, const struct qstr *name) { struct buffer_head *bh, *obh; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_leaf *leaf, *oleaf; int error; u32 index; u64 bn; index = name->hash >> (32 - ip->i_depth); error = get_first_leaf(ip, index, &obh); if (error) return error; do { oleaf = (struct gfs2_leaf *)obh->b_data; bn = be64_to_cpu(oleaf->lf_next); if (!bn) break; brelse(obh); error = get_leaf(ip, bn, &obh); if (error) return error; } while(1); gfs2_trans_add_bh(ip->i_gl, obh, 1); leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth)); if (!leaf) { brelse(obh); return -ENOSPC; } oleaf->lf_next = cpu_to_be64(bh->b_blocknr); brelse(bh); brelse(obh); error = gfs2_meta_inode_buffer(ip, &bh); if (error) return error; gfs2_trans_add_bh(ip->i_gl, bh, 1); gfs2_add_inode_blocks(&ip->i_inode, 1); gfs2_dinode_out(ip, bh->b_data); brelse(bh); return 0; } /** * gfs2_dir_add - Add new filename into directory * @dip: The GFS2 inode * @filename: The new name * @inode: The inode number of the entry * @type: The type of the entry * * Returns: 0 on success, error code on failure */ int gfs2_dir_add(struct inode *inode, const struct qstr *name, const struct gfs2_inode *nip) { struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *bh; struct gfs2_dirent *dent; struct gfs2_leaf *leaf; int error; while(1) { dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh); if (dent) { if (IS_ERR(dent)) return PTR_ERR(dent); dent = gfs2_init_dirent(inode, dent, name, bh); gfs2_inum_out(nip, dent); dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode)); if (ip->i_diskflags & GFS2_DIF_EXHASH) { leaf = (struct gfs2_leaf *)bh->b_data; be16_add_cpu(&leaf->lf_entries, 1); } brelse(bh); error = gfs2_meta_inode_buffer(ip, &bh); if (error) break; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_entries++; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; if (S_ISDIR(nip->i_inode.i_mode)) inc_nlink(&ip->i_inode); gfs2_dinode_out(ip, bh->b_data); brelse(bh); error = 0; break; } if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { error = dir_make_exhash(inode); if (error) break; continue; } error = dir_split_leaf(inode, name); if (error == 0) continue; if (error < 0) break; if (ip->i_depth < GFS2_DIR_MAX_DEPTH) { error = dir_double_exhash(ip); if (error) break; error = dir_split_leaf(inode, name); if (error < 0) break; if (error == 0) continue; } error = dir_new_leaf(inode, name); if (!error) continue; error = -ENOSPC; break; } return error; } /** * gfs2_dir_del - Delete a directory entry * @dip: The GFS2 inode * @filename: The filename * * Returns: 0 on success, error code on failure */ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) { const struct qstr *name = &dentry->d_name; struct gfs2_dirent *dent, *prev = NULL; struct buffer_head *bh; /* Returns _either_ the entry (if its first in block) or the previous entry otherwise */ dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh); if (!dent) { gfs2_consist_inode(dip); return -EIO; } if (IS_ERR(dent)) { gfs2_consist_inode(dip); return PTR_ERR(dent); } /* If not first in block, adjust pointers accordingly */ if (gfs2_dirent_find(dent, name, NULL) == 0) { prev = dent; dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len)); } dirent_del(dip, bh, prev, dent); if (dip->i_diskflags & GFS2_DIF_EXHASH) { struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data; u16 entries = be16_to_cpu(leaf->lf_entries); if (!entries) gfs2_consist_inode(dip); leaf->lf_entries = cpu_to_be16(--entries); } brelse(bh); if (!dip->i_entries) gfs2_consist_inode(dip); dip->i_entries--; dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; if (S_ISDIR(dentry->d_inode->i_mode)) drop_nlink(&dip->i_inode); mark_inode_dirty(&dip->i_inode); return 0; } /** * gfs2_dir_mvino - Change inode number of directory entry * @dip: The GFS2 inode * @filename: * @new_inode: * * This routine changes the inode number of a directory entry. It's used * by rename to change ".." when a directory is moved. * Assumes a glock is held on dvp. * * Returns: errno */ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, const struct gfs2_inode *nip, unsigned int new_type) { struct buffer_head *bh; struct gfs2_dirent *dent; int error; dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh); if (!dent) { gfs2_consist_inode(dip); return -EIO; } if (IS_ERR(dent)) return PTR_ERR(dent); gfs2_trans_add_bh(dip->i_gl, bh, 1); gfs2_inum_out(nip, dent); dent->de_type = cpu_to_be16(new_type); if (dip->i_diskflags & GFS2_DIF_EXHASH) { brelse(bh); error = gfs2_meta_inode_buffer(dip, &bh); if (error) return error; gfs2_trans_add_bh(dip->i_gl, bh, 1); } dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(dip, bh->b_data); brelse(bh); return 0; } /** * leaf_dealloc - Deallocate a directory leaf * @dip: the directory * @index: the hash table offset in the directory * @len: the number of pointers to this leaf * @leaf_no: the leaf number * @leaf_bh: buffer_head for the starting leaf * last_dealloc: 1 if this is the final dealloc for the leaf, else 0 * * Returns: errno */ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, u64 leaf_no, struct buffer_head *leaf_bh, int last_dealloc) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_leaf *tmp_leaf; struct gfs2_rgrp_list rlist; struct buffer_head *bh, *dibh; u64 blk, nblk; unsigned int rg_blocks = 0, l_blocks = 0; char *ht; unsigned int x, size = len * sizeof(u64); int error; error = gfs2_rindex_update(sdp); if (error) return error; memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); ht = kzalloc(size, GFP_NOFS); if (!ht) return -ENOMEM; if (!gfs2_qadata_get(dip)) { error = -ENOMEM; goto out; } error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto out_put; /* Count the number of leaves */ bh = leaf_bh; for (blk = leaf_no; blk; blk = nblk) { if (blk != leaf_no) { error = get_leaf(dip, blk, &bh); if (error) goto out_rlist; } tmp_leaf = (struct gfs2_leaf *)bh->b_data; nblk = be64_to_cpu(tmp_leaf->lf_next); if (blk != leaf_no) brelse(bh); gfs2_rlist_add(dip, &rlist, blk); l_blocks++; } gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); for (x = 0; x < rlist.rl_rgrps; x++) { struct gfs2_rgrpd *rgd; rgd = rlist.rl_ghs[x].gh_gl->gl_object; rg_blocks += rgd->rd_length; } error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); if (error) goto out_rlist; error = gfs2_trans_begin(sdp, rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) + RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks); if (error) goto out_rg_gunlock; bh = leaf_bh; for (blk = leaf_no; blk; blk = nblk) { if (blk != leaf_no) { error = get_leaf(dip, blk, &bh); if (error) goto out_end_trans; } tmp_leaf = (struct gfs2_leaf *)bh->b_data; nblk = be64_to_cpu(tmp_leaf->lf_next); if (blk != leaf_no) brelse(bh); gfs2_free_meta(dip, blk, 1); gfs2_add_inode_blocks(&dip->i_inode, -1); } error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); if (error != size) { if (error >= 0) error = -EIO; goto out_end_trans; } error = gfs2_meta_inode_buffer(dip, &dibh); if (error) goto out_end_trans; gfs2_trans_add_bh(dip->i_gl, dibh, 1); /* On the last dealloc, make this a regular file in case we crash. (We don't want to free these blocks a second time.) */ if (last_dealloc) dip->i_inode.i_mode = S_IFREG; gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); out_end_trans: gfs2_trans_end(sdp); out_rg_gunlock: gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); out_rlist: gfs2_rlist_free(&rlist); gfs2_quota_unhold(dip); out_put: gfs2_qadata_put(dip); out: kfree(ht); return error; } /** * gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory * @dip: the directory * * Dealloc all on-disk directory leaves to FREEMETA state * Change on-disk inode type to "regular file" * * Returns: errno */ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) { struct buffer_head *bh; struct gfs2_leaf *leaf; u32 hsize, len; u32 index = 0, next_index; __be64 *lp; u64 leaf_no; int error = 0, last; hsize = 1 << dip->i_depth; lp = gfs2_dir_get_hash_table(dip); if (IS_ERR(lp)) return PTR_ERR(lp); while (index < hsize) { leaf_no = be64_to_cpu(lp[index]); if (leaf_no) { error = get_leaf(dip, leaf_no, &bh); if (error) goto out; leaf = (struct gfs2_leaf *)bh->b_data; len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth)); next_index = (index & ~(len - 1)) + len; last = ((next_index >= hsize) ? 1 : 0); error = leaf_dealloc(dip, index, len, leaf_no, bh, last); brelse(bh); if (error) goto out; index = next_index; } else index++; } if (index != hsize) { gfs2_consist_inode(dip); error = -EIO; } out: return error; } /** * gfs2_diradd_alloc_required - find if adding entry will require an allocation * @ip: the file being written to * @filname: the filename that's going to be added * * Returns: 1 if alloc required, 0 if not, -ve on error */ int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name) { struct gfs2_dirent *dent; struct buffer_head *bh; dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh); if (!dent) { return 1; } if (IS_ERR(dent)) return PTR_ERR(dent); brelse(bh); return 0; }
gpl-2.0
chil360/chil360-kernel
arch/arm/mach-imx/gpc.c
5266
2603
/* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/io.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/hardware/gic.h> #define GPC_IMR1 0x008 #define GPC_PGC_CPU_PDN 0x2a0 #define IMR_NUM 4 static void __iomem *gpc_base; static u32 gpc_wake_irqs[IMR_NUM]; static u32 gpc_saved_imrs[IMR_NUM]; void imx_gpc_pre_suspend(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; /* Tell GPC to power off ARM core when suspend */ writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN); for (i = 0; i < IMR_NUM; i++) { gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4); } } void imx_gpc_post_resume(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; /* Keep ARM core powered on for other low-power modes */ writel_relaxed(0x0, gpc_base + GPC_PGC_CPU_PDN); for (i = 0; i < IMR_NUM; i++) writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4); } static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on) { unsigned int idx = d->irq / 32 - 1; u32 mask; /* Sanity check for SPI irq */ if (d->irq < 32) return -EINVAL; mask = 1 << d->irq % 32; gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask : gpc_wake_irqs[idx] & ~mask; return 0; } static void imx_gpc_irq_unmask(struct irq_data *d) { void __iomem *reg; u32 val; /* Sanity check for SPI irq */ if (d->irq < 32) return; reg = gpc_base + GPC_IMR1 + (d->irq / 32 - 1) * 4; val = readl_relaxed(reg); val &= ~(1 << d->irq % 32); writel_relaxed(val, reg); } static void imx_gpc_irq_mask(struct irq_data *d) { void __iomem *reg; u32 val; /* Sanity check for SPI irq */ if (d->irq < 32) return; reg = gpc_base + GPC_IMR1 + (d->irq / 32 - 1) * 4; val = readl_relaxed(reg); val |= 1 << (d->irq % 32); writel_relaxed(val, reg); } void __init imx_gpc_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); gpc_base = of_iomap(np, 0); WARN_ON(!gpc_base); /* Register GPC as the secondary interrupt controller behind GIC */ gic_arch_extn.irq_mask = imx_gpc_irq_mask; gic_arch_extn.irq_unmask = imx_gpc_irq_unmask; gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake; }
gpl-2.0
SmartisanTech/T1Kernel
sound/pci/ice1712/prodigy_hifi.c
5522
32421
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for Audiotrak Prodigy 7.1 Hifi * based on pontis.c * * Copyright (c) 2007 Julian Scheel <julian@jusst.de> * Copyright (c) 2007 allank * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "prodigy_hifi.h" struct prodigy_hifi_spec { unsigned short master[2]; unsigned short vol[8]; }; /* I2C addresses */ #define WM_DEV 0x34 /* WM8776 registers */ #define WM_HP_ATTEN_L 0x00 /* headphone left attenuation */ #define WM_HP_ATTEN_R 0x01 /* headphone left attenuation */ #define WM_HP_MASTER 0x02 /* headphone master (both channels), override LLR */ #define WM_DAC_ATTEN_L 0x03 /* digital left attenuation */ #define WM_DAC_ATTEN_R 0x04 #define WM_DAC_MASTER 0x05 #define WM_PHASE_SWAP 0x06 /* DAC phase swap */ #define WM_DAC_CTRL1 0x07 #define WM_DAC_MUTE 0x08 #define WM_DAC_CTRL2 0x09 #define WM_DAC_INT 0x0a #define WM_ADC_INT 0x0b #define WM_MASTER_CTRL 0x0c #define WM_POWERDOWN 0x0d #define WM_ADC_ATTEN_L 0x0e #define WM_ADC_ATTEN_R 0x0f #define WM_ALC_CTRL1 0x10 #define WM_ALC_CTRL2 0x11 #define WM_ALC_CTRL3 0x12 #define WM_NOISE_GATE 0x13 #define WM_LIMITER 0x14 #define WM_ADC_MUX 0x15 #define WM_OUT_MUX 0x16 #define WM_RESET 0x17 /* Analog Recording Source :- Mic, LineIn, CD/Video, */ /* implement capture source select control for WM8776 */ #define WM_AIN1 "AIN1" #define WM_AIN2 "AIN2" #define WM_AIN3 "AIN3" #define WM_AIN4 "AIN4" #define WM_AIN5 "AIN5" /* GPIO pins of envy24ht connected to wm8766 */ #define WM8766_SPI_CLK (1<<17) /* CLK, Pin97 on ICE1724 */ #define WM8766_SPI_MD (1<<16) /* DATA VT1724 -> WM8766, Pin96 */ #define WM8766_SPI_ML (1<<18) /* Latch, Pin98 */ /* WM8766 registers */ #define WM8766_DAC_CTRL 0x02 /* DAC Control */ #define WM8766_INT_CTRL 0x03 /* Interface Control */ #define WM8766_DAC_CTRL2 0x09 #define WM8766_DAC_CTRL3 0x0a #define WM8766_RESET 0x1f #define WM8766_LDA1 0x00 #define WM8766_LDA2 0x04 #define WM8766_LDA3 0x06 #define WM8766_RDA1 0x01 #define WM8766_RDA2 0x05 #define WM8766_RDA3 0x07 #define WM8766_MUTE1 0x0C #define WM8766_MUTE2 0x0F /* * Prodigy HD2 */ #define AK4396_ADDR 0x00 #define AK4396_CSN (1 << 8) /* CSN->GPIO8, pin 75 */ #define AK4396_CCLK (1 << 9) /* CCLK->GPIO9, pin 76 */ #define AK4396_CDTI (1 << 10) /* CDTI->GPIO10, pin 77 */ /* ak4396 registers */ #define AK4396_CTRL1 0x00 #define AK4396_CTRL2 0x01 #define AK4396_CTRL3 0x02 #define AK4396_LCH_ATT 0x03 #define AK4396_RCH_ATT 0x04 /* * get the current register value of WM codec */ static unsigned short wm_get(struct snd_ice1712 *ice, int reg) { reg <<= 1; return ((unsigned short)ice->akm[0].images[reg] << 8) | ice->akm[0].images[reg + 1]; } /* * set the register value of WM codec and remember it */ static void wm_put_nocache(struct snd_ice1712 *ice, int reg, unsigned short val) { unsigned short cval; cval = (reg << 9) | val; snd_vt1724_write_i2c(ice, WM_DEV, cval >> 8, cval & 0xff); } static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val) { wm_put_nocache(ice, reg, val); reg <<= 1; ice->akm[0].images[reg] = val >> 8; ice->akm[0].images[reg + 1] = val; } /* * write data in the SPI mode */ static void set_gpio_bit(struct snd_ice1712 *ice, unsigned int bit, int val) { unsigned int tmp = snd_ice1712_gpio_read(ice); if (val) tmp |= bit; else tmp &= ~bit; snd_ice1712_gpio_write(ice, tmp); } /* * SPI implementation for WM8766 codec - only writing supported, no readback */ static void wm8766_spi_send_word(struct snd_ice1712 *ice, unsigned int data) { int i; for (i = 0; i < 16; i++) { set_gpio_bit(ice, WM8766_SPI_CLK, 0); udelay(1); set_gpio_bit(ice, WM8766_SPI_MD, data & 0x8000); udelay(1); set_gpio_bit(ice, WM8766_SPI_CLK, 1); udelay(1); data <<= 1; } } static void wm8766_spi_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int block; snd_ice1712_gpio_set_dir(ice, WM8766_SPI_MD| WM8766_SPI_CLK|WM8766_SPI_ML); snd_ice1712_gpio_set_mask(ice, ~(WM8766_SPI_MD| WM8766_SPI_CLK|WM8766_SPI_ML)); /* latch must be low when writing */ set_gpio_bit(ice, WM8766_SPI_ML, 0); block = (reg << 9) | (data & 0x1ff); wm8766_spi_send_word(ice, block); /* REGISTER ADDRESS */ /* release latch */ set_gpio_bit(ice, WM8766_SPI_ML, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); } /* * serial interface for ak4396 - only writing supported, no readback */ static void ak4396_send_word(struct snd_ice1712 *ice, unsigned int data) { int i; for (i = 0; i < 16; i++) { set_gpio_bit(ice, AK4396_CCLK, 0); udelay(1); set_gpio_bit(ice, AK4396_CDTI, data & 0x8000); udelay(1); set_gpio_bit(ice, AK4396_CCLK, 1); udelay(1); data <<= 1; } } static void ak4396_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int block; snd_ice1712_gpio_set_dir(ice, AK4396_CSN|AK4396_CCLK|AK4396_CDTI); snd_ice1712_gpio_set_mask(ice, ~(AK4396_CSN|AK4396_CCLK|AK4396_CDTI)); /* latch must be low when writing */ set_gpio_bit(ice, AK4396_CSN, 0); block = ((AK4396_ADDR & 0x03) << 14) | (1 << 13) | ((reg & 0x1f) << 8) | (data & 0xff); ak4396_send_word(ice, block); /* REGISTER ADDRESS */ /* release latch */ set_gpio_bit(ice, AK4396_CSN, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); } /* * ak4396 mixers */ /* * DAC volume attenuation mixer control (-64dB to 0dB) */ static int ak4396_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = 0xFF; /* linear */ return 0; } static int ak4396_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->vol[i]; return 0; } static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; int change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { if (ucontrol->value.integer.value[i] != spec->vol[i]) { spec->vol[i] = ucontrol->value.integer.value[i]; ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1); static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Front Playback Volume", .info = ak4396_dac_vol_info, .get = ak4396_dac_vol_get, .put = ak4396_dac_vol_put, .tlv = { .p = db_scale_wm_dac }, }, }; /* --------------- */ /* * Logarithmic volume values for WM87*6 * Computed as 20 * Log10(255 / x) */ static const unsigned char wm_vol[256] = { 127, 48, 42, 39, 36, 34, 33, 31, 30, 29, 28, 27, 27, 26, 25, 25, 24, 24, 23, 23, 22, 22, 21, 21, 21, 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 14, 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define WM_VOL_MAX (sizeof(wm_vol) - 1) #define WM_VOL_MUTE 0x8000 #define DAC_0dB 0xff #define DAC_RES 128 #define DAC_MIN (DAC_0dB - DAC_RES) static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned short vol, unsigned short master) { unsigned char nvol; if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; else { nvol = (((vol & ~WM_VOL_MUTE) * (master & ~WM_VOL_MUTE)) / 128) & WM_VOL_MAX; nvol = (nvol ? (nvol + DAC_MIN) : 0) & 0xff; } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x100 | nvol); } static void wm8766_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned short vol, unsigned short master) { unsigned char nvol; if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; else { nvol = (((vol & ~WM_VOL_MUTE) * (master & ~WM_VOL_MUTE)) / 128) & WM_VOL_MAX; nvol = (nvol ? (nvol + DAC_MIN) : 0) & 0xff; } wm8766_spi_write(ice, index, (0x0100 | nvol)); } /* * DAC volume attenuation mixer control (-64dB to 0dB) */ static int wm_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = DAC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->vol[2 + i] & ~WM_VOL_MUTE; return 0; } static int wm_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { if (ucontrol->value.integer.value[i] != spec->vol[2 + i]) { idx = WM_DAC_ATTEN_L + i; spec->vol[2 + i] &= WM_VOL_MUTE; spec->vol[2 + i] |= ucontrol->value.integer.value[i]; wm_set_vol(ice, idx, spec->vol[2 + i], spec->master[i]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * WM8766 DAC volume attenuation mixer control */ static int wm8766_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int voices = kcontrol->private_value >> 8; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = voices; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = DAC_RES; /* 0dB */ return 0; } static int wm8766_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, ofs, voices; voices = kcontrol->private_value >> 8; ofs = kcontrol->private_value & 0xff; for (i = 0; i < voices; i++) ucontrol->value.integer.value[i] = spec->vol[ofs + i]; return 0; } static int wm8766_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, idx, ofs, voices; int change = 0; voices = kcontrol->private_value >> 8; ofs = kcontrol->private_value & 0xff; mutex_lock(&ice->gpio_mutex); for (i = 0; i < voices; i++) { if (ucontrol->value.integer.value[i] != spec->vol[ofs + i]) { idx = WM8766_LDA1 + ofs + i; spec->vol[ofs + i] &= WM_VOL_MUTE; spec->vol[ofs + i] |= ucontrol->value.integer.value[i]; wm8766_set_vol(ice, idx, spec->vol[ofs + i], spec->master[i]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * Master volume attenuation mixer control / applied to WM8776+WM8766 */ static int wm_master_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = DAC_RES; return 0; } static int wm_master_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->master[i]; return 0; } static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int ch, change = 0; mutex_lock(&ice->gpio_mutex); for (ch = 0; ch < 2; ch++) { if (ucontrol->value.integer.value[ch] != spec->master[ch]) { spec->master[ch] = ucontrol->value.integer.value[ch]; /* Apply to front DAC */ wm_set_vol(ice, WM_DAC_ATTEN_L + ch, spec->vol[2 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA1 + ch, spec->vol[0 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA2 + ch, spec->vol[4 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA3 + ch, spec->vol[6 + ch], spec->master[ch]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* KONSTI */ static int wm_adc_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char* texts[32] = { "NULL", WM_AIN1, WM_AIN2, WM_AIN1 "+" WM_AIN2, WM_AIN3, WM_AIN1 "+" WM_AIN3, WM_AIN2 "+" WM_AIN3, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3, WM_AIN4, WM_AIN1 "+" WM_AIN4, WM_AIN2 "+" WM_AIN4, WM_AIN1 "+" WM_AIN2 "+" WM_AIN4, WM_AIN3 "+" WM_AIN4, WM_AIN1 "+" WM_AIN3 "+" WM_AIN4, WM_AIN2 "+" WM_AIN3 "+" WM_AIN4, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN4, WM_AIN5, WM_AIN1 "+" WM_AIN5, WM_AIN2 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN5, WM_AIN3 "+" WM_AIN5, WM_AIN1 "+" WM_AIN3 "+" WM_AIN5, WM_AIN2 "+" WM_AIN3 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN5, WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN4 "+" WM_AIN5, WM_AIN2 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN4 "+" WM_AIN5, WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN2 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5 }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 32; if (uinfo->value.enumerated.item > 31) uinfo->value.enumerated.item = 31; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short oval, nval; int change = 0; mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_ADC_MUX); nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; if (nval != oval) { wm_put(ice, WM_ADC_MUX, nval); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* KONSTI */ /* * ADC gain mixer control (-64dB to 0dB) */ #define ADC_0dB 0xcf #define ADC_RES 128 #define ADC_MIN (ADC_0dB - ADC_RES) static int wm_adc_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute (-64dB) */ uinfo->value.integer.max = ADC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_adc_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val; int i; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { val = wm_get(ice, WM_ADC_ATTEN_L + i) & 0xff; val = val > ADC_MIN ? (val - ADC_MIN) : 0; ucontrol->value.integer.value[i] = val; } mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short ovol, nvol; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { nvol = ucontrol->value.integer.value[i]; nvol = nvol ? (nvol + ADC_MIN) : 0; idx = WM_ADC_ATTEN_L + i; ovol = wm_get(ice, idx) & 0xff; if (ovol != nvol) { wm_put(ice, idx, nvol); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * ADC input mux mixer control */ #define wm_adc_mux_info snd_ctl_boolean_mono_info static int wm_adc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_ADC_MUX) & (1 << bit)) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; unsigned short oval, nval; int change; mutex_lock(&ice->gpio_mutex); nval = oval = wm_get(ice, WM_ADC_MUX); if (ucontrol->value.integer.value[0]) nval |= (1 << bit); else nval &= ~(1 << bit); change = nval != oval; if (change) { wm_put(ice, WM_ADC_MUX, nval); } mutex_unlock(&ice->gpio_mutex); return 0; } /* * Analog bypass (In -> Out) */ #define wm_bypass_info snd_ctl_boolean_mono_info static int wm_bypass_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_OUT_MUX) & 0x04) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_bypass_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); val = oval = wm_get(ice, WM_OUT_MUX); if (ucontrol->value.integer.value[0]) val |= 0x04; else val &= ~0x04; if (val != oval) { wm_put(ice, WM_OUT_MUX, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * Left/Right swap */ #define wm_chswap_info snd_ctl_boolean_mono_info static int wm_chswap_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_DAC_CTRL1) & 0xf0) != 0x90; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_chswap_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_DAC_CTRL1); val = oval & 0x0f; if (ucontrol->value.integer.value[0]) val |= 0x60; else val |= 0x90; if (val != oval) { wm_put(ice, WM_DAC_CTRL1, val); wm_put_nocache(ice, WM_DAC_CTRL1, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * mixers */ static struct snd_kcontrol_new prodigy_hifi_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Master Playback Volume", .info = wm_master_vol_info, .get = wm_master_vol_get, .put = wm_master_vol_put, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Front Playback Volume", .info = wm_dac_vol_info, .get = wm_dac_vol_get, .put = wm_dac_vol_put, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Rear Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (2 << 8) | 0, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Center Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (1 << 8) | 4, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "LFE Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (1 << 8) | 5, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Side Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (2 << 8) | 6, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = wm_adc_vol_info, .get = wm_adc_vol_get, .put = wm_adc_vol_put, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "CD Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 1, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Bypass Switch", .info = wm_bypass_info, .get = wm_bypass_get, .put = wm_bypass_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Swap Output Channels", .info = wm_chswap_info, .get = wm_chswap_get, .put = wm_chswap_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Capture Source", .info = wm_adc_mux_enum_info, .get = wm_adc_mux_enum_get, .put = wm_adc_mux_enum_put, }, }; /* * WM codec registers */ static void wm_proc_regs_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; char line[64]; unsigned int reg, val; mutex_lock(&ice->gpio_mutex); while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if (reg <= 0x17 && val <= 0xffff) wm_put(ice, reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; int reg, val; mutex_lock(&ice->gpio_mutex); for (reg = 0; reg <= 0x17; reg++) { val = wm_get(ice, reg); snd_iprintf(buffer, "%02x = %04x\n", reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "wm_codec", &entry)) { snd_info_set_text_ops(entry, ice, wm_proc_regs_read); entry->mode |= S_IWUSR; entry->c.text.write = wm_proc_regs_write; } } static int __devinit prodigy_hifi_add_controls(struct snd_ice1712 *ice) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(prodigy_hifi_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&prodigy_hifi_controls[i], ice)); if (err < 0) return err; } wm_proc_init(ice); return 0; } static int __devinit prodigy_hd2_add_controls(struct snd_ice1712 *ice) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(prodigy_hd2_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&prodigy_hd2_controls[i], ice)); if (err < 0) return err; } wm_proc_init(ice); return 0; } /* * initialize the chip */ static int __devinit prodigy_hifi_init(struct snd_ice1712 *ice) { static unsigned short wm_inits[] = { /* These come first to reduce init pop noise */ WM_ADC_MUX, 0x0003, /* ADC mute */ /* 0x00c0 replaced by 0x0003 */ WM_DAC_MUTE, 0x0001, /* DAC softmute */ WM_DAC_CTRL1, 0x0000, /* DAC mute */ WM_POWERDOWN, 0x0008, /* All power-up except HP */ WM_RESET, 0x0000, /* reset */ }; static unsigned short wm_inits2[] = { WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */ WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_DAC_CTRL1, 0x0090, /* DAC L/R */ WM_OUT_MUX, 0x0001, /* OUT DAC */ WM_HP_ATTEN_L, 0x0179, /* HP 0dB */ WM_HP_ATTEN_R, 0x0179, /* HP 0dB */ WM_DAC_ATTEN_L, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_L, 0x0100, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0100, /* DAC 0dB */ WM_PHASE_SWAP, 0x0000, /* phase normal */ #if 0 WM_DAC_MASTER, 0x0100, /* DAC master muted */ #endif WM_DAC_CTRL2, 0x0000, /* no deemphasis, no ZFLG */ WM_ADC_ATTEN_L, 0x0000, /* ADC muted */ WM_ADC_ATTEN_R, 0x0000, /* ADC muted */ #if 1 WM_ALC_CTRL1, 0x007b, /* */ WM_ALC_CTRL2, 0x0000, /* */ WM_ALC_CTRL3, 0x0000, /* */ WM_NOISE_GATE, 0x0000, /* */ #endif WM_DAC_MUTE, 0x0000, /* DAC unmute */ WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */ }; static unsigned short wm8766_inits[] = { WM8766_RESET, 0x0000, WM8766_DAC_CTRL, 0x0120, WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */ WM8766_DAC_CTRL2, 0x0001, WM8766_DAC_CTRL3, 0x0080, WM8766_LDA1, 0x0100, WM8766_LDA2, 0x0100, WM8766_LDA3, 0x0100, WM8766_RDA1, 0x0100, WM8766_RDA2, 0x0100, WM8766_RDA3, 0x0100, WM8766_MUTE1, 0x0000, WM8766_MUTE2, 0x0000, }; struct prodigy_hifi_spec *spec; unsigned int i; ice->vt1720 = 0; ice->vt1724 = 1; ice->num_total_dacs = 8; ice->num_total_adcs = 1; /* HACK - use this as the SPDIF source. * don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten */ ice->gpio.saved[0] = 0; /* to remember the register values */ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ice->akm) return -ENOMEM; ice->akm_codecs = 1; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; /* initialize WM8776 codec */ for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2) wm_put(ice, wm_inits[i], wm_inits[i+1]); schedule_timeout_uninterruptible(1); for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2) wm_put(ice, wm_inits2[i], wm_inits2[i+1]); /* initialize WM8766 codec */ for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2) wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i+1]); return 0; } /* * initialize the chip */ static void ak4396_init(struct snd_ice1712 *ice) { static unsigned short ak4396_inits[] = { AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */ AK4396_CTRL2, 0x02, AK4396_CTRL3, 0x00, AK4396_LCH_ATT, 0x00, AK4396_RCH_ATT, 0x00, }; unsigned int i; /* initialize ak4396 codec */ /* reset codec */ ak4396_write(ice, AK4396_CTRL1, 0x86); msleep(100); ak4396_write(ice, AK4396_CTRL1, 0x87); for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2) ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]); } #ifdef CONFIG_PM static int prodigy_hd2_resume(struct snd_ice1712 *ice) { /* initialize ak4396 codec and restore previous mixer volumes */ struct prodigy_hifi_spec *spec = ice->spec; int i; mutex_lock(&ice->gpio_mutex); ak4396_init(ice); for (i = 0; i < 2; i++) ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff); mutex_unlock(&ice->gpio_mutex); return 0; } #endif static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice) { struct prodigy_hifi_spec *spec; ice->vt1720 = 0; ice->vt1724 = 1; ice->num_total_dacs = 1; ice->num_total_adcs = 1; /* HACK - use this as the SPDIF source. * don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten */ ice->gpio.saved[0] = 0; /* to remember the register values */ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ice->akm) return -ENOMEM; ice->akm_codecs = 1; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; #ifdef CONFIG_PM ice->pm_resume = &prodigy_hd2_resume; ice->pm_suspend_enabled = 1; #endif ak4396_init(ice); return 0; } static unsigned char prodigy71hifi_eeprom[] __devinitdata = { 0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc3, /* SPDIF: out-en, out-int, spdif-in */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; static unsigned char prodigyhd2_eeprom[] __devinitdata = { 0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc3, /* SPDIF: out-en, out-int, spdif-in */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; static unsigned char fortissimo4_eeprom[] __devinitdata = { 0x43, /* SYSCONF: clock 512, ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc1, /* SPDIF: out-en, out-int */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_prodigy_hifi_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_PRODIGY_HIFI, .name = "Audiotrak Prodigy 7.1 HiFi", .model = "prodigy71hifi", .chip_init = prodigy_hifi_init, .build_controls = prodigy_hifi_add_controls, .eeprom_size = sizeof(prodigy71hifi_eeprom), .eeprom_data = prodigy71hifi_eeprom, .driver = "Prodigy71HIFI", }, { .subvendor = VT1724_SUBDEVICE_PRODIGY_HD2, .name = "Audiotrak Prodigy HD2", .model = "prodigyhd2", .chip_init = prodigy_hd2_init, .build_controls = prodigy_hd2_add_controls, .eeprom_size = sizeof(prodigyhd2_eeprom), .eeprom_data = prodigyhd2_eeprom, .driver = "Prodigy71HD2", }, { .subvendor = VT1724_SUBDEVICE_FORTISSIMO4, .name = "Hercules Fortissimo IV", .model = "fortissimo4", .chip_init = prodigy_hifi_init, .build_controls = prodigy_hifi_add_controls, .eeprom_size = sizeof(fortissimo4_eeprom), .eeprom_data = fortissimo4_eeprom, .driver = "Fortissimo4", }, { } /* terminator */ };
gpl-2.0
invisiblek/android_kernel_lge_dory
arch/mips/lasat/interrupt.c
9874
3483
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines for generic manipulation of the interrupts found on the * Lasat boards. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/lasat/lasat.h> #include <asm/lasat/lasatint.h> #include <irq.h> static volatile int *lasat_int_status; static volatile int *lasat_int_mask; static volatile int lasat_int_mask_shift; void disable_lasat_irq(struct irq_data *d) { unsigned int irq_nr = d->irq - LASAT_IRQ_BASE; *lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift; } void enable_lasat_irq(struct irq_data *d) { unsigned int irq_nr = d->irq - LASAT_IRQ_BASE; *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; } static struct irq_chip lasat_irq_type = { .name = "Lasat", .irq_mask = disable_lasat_irq, .irq_unmask = enable_lasat_irq, }; static inline int ls1bit32(unsigned int x) { int b = 31, s; s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; s = 1; if (x << 1 == 0) s = 0; b -= s; return b; } static unsigned long (*get_int_status)(void); static unsigned long get_int_status_100(void) { return *lasat_int_status & *lasat_int_mask; } static unsigned long get_int_status_200(void) { unsigned long int_status; int_status = *lasat_int_status; int_status &= (int_status >> LASATINT_MASK_SHIFT_200) & 0xffff; return int_status; } asmlinkage void plat_irq_dispatch(void) { unsigned long int_status; unsigned int cause = read_c0_cause(); int irq; if (cause & CAUSEF_IP7) { /* R4000 count / compare IRQ */ do_IRQ(7); return; } int_status = get_int_status(); /* if int_status == 0, then the interrupt has already been cleared */ if (int_status) { irq = LASAT_IRQ_BASE + ls1bit32(int_status); do_IRQ(irq); } } static struct irqaction cascade = { .handler = no_action, .name = "cascade", .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) { int i; if (IS_LASAT_200()) { lasat_int_status = (void *)LASAT_INT_STATUS_REG_200; lasat_int_mask = (void *)LASAT_INT_MASK_REG_200; lasat_int_mask_shift = LASATINT_MASK_SHIFT_200; get_int_status = get_int_status_200; *lasat_int_mask &= 0xffff; } else { lasat_int_status = (void *)LASAT_INT_STATUS_REG_100; lasat_int_mask = (void *)LASAT_INT_MASK_REG_100; lasat_int_mask_shift = LASATINT_MASK_SHIFT_100; get_int_status = get_int_status_100; *lasat_int_mask = 0; } mips_cpu_irq_init(); for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++) irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq); setup_irq(LASAT_CASCADE_IRQ, &cascade); }
gpl-2.0
1N4148/android_kernel_samsung_msm7x27a
drivers/ide/ide-generic.c
14482
3963
/* * generic/default IDE host driver * * Copyright (C) 2004, 2008-2009 Bartlomiej Zolnierkiewicz * This code was split off from ide.c. See it for original copyrights. * * May be copied or modified under the terms of the GNU General Public License. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ide.h> #include <linux/pci_ids.h> /* FIXME: convert arm and m32r to use ide_platform host driver */ #ifdef CONFIG_ARM #include <asm/irq.h> #endif #ifdef CONFIG_M32R #include <asm/m32r.h> #endif #define DRV_NAME "ide_generic" static int probe_mask; module_param(probe_mask, int, 0); MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports"); static const struct ide_port_info ide_generic_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; #ifdef CONFIG_ARM static const u16 legacy_bases[] = { 0x1f0 }; static const int legacy_irqs[] = { IRQ_HARDDISK }; #elif defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) || \ defined(CONFIG_PLAT_OPSPUT) static const u16 legacy_bases[] = { 0x1f0 }; static const int legacy_irqs[] = { PLD_IRQ_CFIREQ }; #elif defined(CONFIG_PLAT_MAPPI3) static const u16 legacy_bases[] = { 0x1f0, 0x170 }; static const int legacy_irqs[] = { PLD_IRQ_CFIREQ, PLD_IRQ_IDEIREQ }; #elif defined(CONFIG_ALPHA) static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 }; static const int legacy_irqs[] = { 14, 15, 11, 10 }; #else static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 }; #endif static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary) { #ifdef CONFIG_PCI struct pci_dev *p = NULL; u16 val; for_each_pci_dev(p) { if (pci_resource_start(p, 0) == 0x1f0) *primary = 1; if (pci_resource_start(p, 2) == 0x170) *secondary = 1; /* Cyrix CS55{1,2}0 pre SFF MWDMA ATA on the bridge */ if (p->vendor == PCI_VENDOR_ID_CYRIX && (p->device == PCI_DEVICE_ID_CYRIX_5510 || p->device == PCI_DEVICE_ID_CYRIX_5520)) *primary = *secondary = 1; /* Intel MPIIX - PIO ATA on non PCI side of bridge */ if (p->vendor == PCI_VENDOR_ID_INTEL && p->device == PCI_DEVICE_ID_INTEL_82371MX) { pci_read_config_word(p, 0x6C, &val); if (val & 0x8000) { /* ATA port enabled */ if (val & 0x4000) *secondary = 1; else *primary = 1; } } } #endif } static int __init ide_generic_init(void) { struct ide_hw hw, *hws[] = { &hw }; unsigned long io_addr; int i, rc = 0, primary = 0, secondary = 0; ide_generic_check_pci_legacy_iobases(&primary, &secondary); if (!probe_mask) { printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" " "module parameter for probing all legacy ISA IDE ports\n"); if (primary == 0) probe_mask |= 0x1; if (secondary == 0) probe_mask |= 0x2; } else printk(KERN_INFO DRV_NAME ": enforcing probing of I/O ports " "upon user request\n"); for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) { io_addr = legacy_bases[i]; if ((probe_mask & (1 << i)) && io_addr) { if (!request_region(io_addr, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX " "not free.\n", DRV_NAME, io_addr, io_addr + 7); rc = -EBUSY; continue; } if (!request_region(io_addr + 0x206, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX " "not free.\n", DRV_NAME, io_addr + 0x206); release_region(io_addr, 8); rc = -EBUSY; continue; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, io_addr, io_addr + 0x206); #ifdef CONFIG_IA64 hw.irq = isa_irq_to_vector(legacy_irqs[i]); #else hw.irq = legacy_irqs[i]; #endif rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL); if (rc) { release_region(io_addr + 0x206, 1); release_region(io_addr, 8); } } } return rc; } module_init(ide_generic_init); MODULE_LICENSE("GPL");
gpl-2.0
cfriedt/bluetooth-next
drivers/gpio/gpio-sx150x.c
147
16731
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/i2c/sx150x.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> #include <linux/of_device.h> #define NO_UPDATE_PENDING -1 /* The chip models of sx150x */ #define SX150X_456 0 #define SX150X_789 1 struct sx150x_456_pri { u8 reg_pld_mode; u8 reg_pld_table0; u8 reg_pld_table1; u8 reg_pld_table2; u8 reg_pld_table3; u8 reg_pld_table4; u8 reg_advance; }; struct sx150x_789_pri { u8 reg_drain; u8 reg_polarity; u8 reg_clock; u8 reg_misc; u8 reg_reset; u8 ngpios; }; struct sx150x_device_data { u8 model; u8 reg_pullup; u8 reg_pulldn; u8 reg_dir; u8 reg_data; u8 reg_irq_mask; u8 reg_irq_src; u8 reg_sense; u8 ngpios; union { struct sx150x_456_pri x456; struct sx150x_789_pri x789; } pri; }; struct sx150x_chip { struct gpio_chip gpio_chip; struct i2c_client *client; const struct sx150x_device_data *dev_cfg; int irq_summary; int irq_base; int irq_update; u32 irq_sense; u32 irq_masked; u32 dev_sense; u32 dev_masked; struct irq_chip irq_chip; struct mutex lock; }; static const struct sx150x_device_data sx150x_devices[] = { [0] = { /* sx1508q */ .model = SX150X_789, .reg_pullup = 0x03, .reg_pulldn = 0x04, .reg_dir = 0x07, .reg_data = 0x08, .reg_irq_mask = 0x09, .reg_irq_src = 0x0c, .reg_sense = 0x0b, .pri.x789 = { .reg_drain = 0x05, .reg_polarity = 0x06, .reg_clock = 0x0f, .reg_misc = 0x10, .reg_reset = 0x7d, }, .ngpios = 8, }, [1] = { /* sx1509q */ .model = SX150X_789, .reg_pullup = 0x07, .reg_pulldn = 0x09, .reg_dir = 0x0f, .reg_data = 0x11, .reg_irq_mask = 0x13, .reg_irq_src = 0x19, .reg_sense = 0x17, .pri.x789 = { .reg_drain = 0x0b, .reg_polarity = 0x0d, .reg_clock = 0x1e, .reg_misc = 0x1f, .reg_reset = 0x7d, }, .ngpios = 16 }, [2] = { /* sx1506q */ .model = SX150X_456, .reg_pullup = 0x05, .reg_pulldn = 0x07, .reg_dir = 0x03, .reg_data = 0x01, .reg_irq_mask = 0x09, .reg_irq_src = 0x0f, .reg_sense = 0x0d, .pri.x456 = { .reg_pld_mode = 0x21, .reg_pld_table0 = 0x23, .reg_pld_table1 = 0x25, .reg_pld_table2 = 0x27, .reg_pld_table3 = 0x29, .reg_pld_table4 = 0x2b, .reg_advance = 0xad, }, .ngpios = 16 }, }; static const struct i2c_device_id sx150x_id[] = { {"sx1508q", 0}, {"sx1509q", 1}, {"sx1506q", 2}, {} }; MODULE_DEVICE_TABLE(i2c, sx150x_id); static const struct of_device_id sx150x_of_match[] = { { .compatible = "semtech,sx1508q" }, { .compatible = "semtech,sx1509q" }, { .compatible = "semtech,sx1506q" }, {}, }; MODULE_DEVICE_TABLE(of, sx150x_of_match); struct sx150x_chip *to_sx150x(struct gpio_chip *gc) { return container_of(gc, struct sx150x_chip, gpio_chip); } static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val) { s32 err = i2c_smbus_write_byte_data(client, reg, val); if (err < 0) dev_warn(&client->dev, "i2c write fail: can't write %02x to %02x: %d\n", val, reg, err); return err; } static s32 sx150x_i2c_read(struct i2c_client *client, u8 reg, u8 *val) { s32 err = i2c_smbus_read_byte_data(client, reg); if (err >= 0) *val = err; else dev_warn(&client->dev, "i2c read fail: can't read from %02x: %d\n", reg, err); return err; } static inline bool offset_is_oscio(struct sx150x_chip *chip, unsigned offset) { return (chip->dev_cfg->ngpios == offset); } /* * These utility functions solve the common problem of locating and setting * configuration bits. Configuration bits are grouped into registers * whose indexes increase downwards. For example, with eight-bit registers, * sixteen gpios would have their config bits grouped in the following order: * REGISTER N-1 [ f e d c b a 9 8 ] * N [ 7 6 5 4 3 2 1 0 ] * * For multi-bit configurations, the pattern gets wider: * REGISTER N-3 [ f f e e d d c c ] * N-2 [ b b a a 9 9 8 8 ] * N-1 [ 7 7 6 6 5 5 4 4 ] * N [ 3 3 2 2 1 1 0 0 ] * * Given the address of the starting register 'N', the index of the gpio * whose configuration we seek to change, and the width in bits of that * configuration, these functions allow us to locate the correct * register and mask the correct bits. */ static inline void sx150x_find_cfg(u8 offset, u8 width, u8 *reg, u8 *mask, u8 *shift) { *reg -= offset * width / 8; *mask = (1 << width) - 1; *shift = (offset * width) % 8; *mask <<= *shift; } static s32 sx150x_write_cfg(struct sx150x_chip *chip, u8 offset, u8 width, u8 reg, u8 val) { u8 mask; u8 data; u8 shift; s32 err; sx150x_find_cfg(offset, width, &reg, &mask, &shift); err = sx150x_i2c_read(chip->client, reg, &data); if (err < 0) return err; data &= ~mask; data |= (val << shift) & mask; return sx150x_i2c_write(chip->client, reg, data); } static int sx150x_get_io(struct sx150x_chip *chip, unsigned offset) { u8 reg = chip->dev_cfg->reg_data; u8 mask; u8 data; u8 shift; s32 err; sx150x_find_cfg(offset, 1, &reg, &mask, &shift); err = sx150x_i2c_read(chip->client, reg, &data); if (err >= 0) err = (data & mask) != 0 ? 1 : 0; return err; } static void sx150x_set_oscio(struct sx150x_chip *chip, int val) { sx150x_i2c_write(chip->client, chip->dev_cfg->pri.x789.reg_clock, (val ? 0x1f : 0x10)); } static void sx150x_set_io(struct sx150x_chip *chip, unsigned offset, int val) { sx150x_write_cfg(chip, offset, 1, chip->dev_cfg->reg_data, (val ? 1 : 0)); } static int sx150x_io_input(struct sx150x_chip *chip, unsigned offset) { return sx150x_write_cfg(chip, offset, 1, chip->dev_cfg->reg_dir, 1); } static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val) { int err; err = sx150x_write_cfg(chip, offset, 1, chip->dev_cfg->reg_data, (val ? 1 : 0)); if (err >= 0) err = sx150x_write_cfg(chip, offset, 1, chip->dev_cfg->reg_dir, 0); return err; } static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset) { struct sx150x_chip *chip = to_sx150x(gc); int status = -EINVAL; if (!offset_is_oscio(chip, offset)) { mutex_lock(&chip->lock); status = sx150x_get_io(chip, offset); mutex_unlock(&chip->lock); } return status; } static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val) { struct sx150x_chip *chip = to_sx150x(gc); mutex_lock(&chip->lock); if (offset_is_oscio(chip, offset)) sx150x_set_oscio(chip, val); else sx150x_set_io(chip, offset, val); mutex_unlock(&chip->lock); } static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset) { struct sx150x_chip *chip = to_sx150x(gc); int status = -EINVAL; if (!offset_is_oscio(chip, offset)) { mutex_lock(&chip->lock); status = sx150x_io_input(chip, offset); mutex_unlock(&chip->lock); } return status; } static int sx150x_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val) { struct sx150x_chip *chip = to_sx150x(gc); int status = 0; if (!offset_is_oscio(chip, offset)) { mutex_lock(&chip->lock); status = sx150x_io_output(chip, offset, val); mutex_unlock(&chip->lock); } return status; } static void sx150x_irq_mask(struct irq_data *d) { struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d)); unsigned n = d->hwirq; chip->irq_masked |= (1 << n); chip->irq_update = n; } static void sx150x_irq_unmask(struct irq_data *d) { struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d)); unsigned n = d->hwirq; chip->irq_masked &= ~(1 << n); chip->irq_update = n; } static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d)); unsigned n, val = 0; if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) return -EINVAL; n = d->hwirq; if (flow_type & IRQ_TYPE_EDGE_RISING) val |= 0x1; if (flow_type & IRQ_TYPE_EDGE_FALLING) val |= 0x2; chip->irq_sense &= ~(3UL << (n * 2)); chip->irq_sense |= val << (n * 2); chip->irq_update = n; return 0; } static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id) { struct sx150x_chip *chip = (struct sx150x_chip *)dev_id; unsigned nhandled = 0; unsigned sub_irq; unsigned n; s32 err; u8 val; int i; for (i = (chip->dev_cfg->ngpios / 8) - 1; i >= 0; --i) { err = sx150x_i2c_read(chip->client, chip->dev_cfg->reg_irq_src - i, &val); if (err < 0) continue; sx150x_i2c_write(chip->client, chip->dev_cfg->reg_irq_src - i, val); for (n = 0; n < 8; ++n) { if (val & (1 << n)) { sub_irq = irq_find_mapping( chip->gpio_chip.irqdomain, (i * 8) + n); handle_nested_irq(sub_irq); ++nhandled; } } } return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } static void sx150x_irq_bus_lock(struct irq_data *d) { struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d)); mutex_lock(&chip->lock); } static void sx150x_irq_bus_sync_unlock(struct irq_data *d) { struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d)); unsigned n; if (chip->irq_update == NO_UPDATE_PENDING) goto out; n = chip->irq_update; chip->irq_update = NO_UPDATE_PENDING; /* Avoid updates if nothing changed */ if (chip->dev_sense == chip->irq_sense && chip->dev_masked == chip->irq_masked) goto out; chip->dev_sense = chip->irq_sense; chip->dev_masked = chip->irq_masked; if (chip->irq_masked & (1 << n)) { sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1); sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0); } else { sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0); sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, chip->irq_sense >> (n * 2)); } out: mutex_unlock(&chip->lock); } static void sx150x_init_chip(struct sx150x_chip *chip, struct i2c_client *client, kernel_ulong_t driver_data, struct sx150x_platform_data *pdata) { mutex_init(&chip->lock); chip->client = client; chip->dev_cfg = &sx150x_devices[driver_data]; chip->gpio_chip.dev = &client->dev; chip->gpio_chip.label = client->name; chip->gpio_chip.direction_input = sx150x_gpio_direction_input; chip->gpio_chip.direction_output = sx150x_gpio_direction_output; chip->gpio_chip.get = sx150x_gpio_get; chip->gpio_chip.set = sx150x_gpio_set; chip->gpio_chip.base = pdata->gpio_base; chip->gpio_chip.can_sleep = true; chip->gpio_chip.ngpio = chip->dev_cfg->ngpios; #ifdef CONFIG_OF_GPIO chip->gpio_chip.of_node = client->dev.of_node; chip->gpio_chip.of_gpio_n_cells = 2; #endif if (pdata->oscio_is_gpo) ++chip->gpio_chip.ngpio; chip->irq_chip.name = client->name; chip->irq_chip.irq_mask = sx150x_irq_mask; chip->irq_chip.irq_unmask = sx150x_irq_unmask; chip->irq_chip.irq_set_type = sx150x_irq_set_type; chip->irq_chip.irq_bus_lock = sx150x_irq_bus_lock; chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock; chip->irq_summary = -1; chip->irq_base = -1; chip->irq_masked = ~0; chip->irq_sense = 0; chip->dev_masked = ~0; chip->dev_sense = 0; chip->irq_update = NO_UPDATE_PENDING; } static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg) { int err = 0; unsigned n; for (n = 0; err >= 0 && n < (chip->dev_cfg->ngpios / 8); ++n) err = sx150x_i2c_write(chip->client, base - n, cfg >> (n * 8)); return err; } static int sx150x_reset(struct sx150x_chip *chip) { int err; err = i2c_smbus_write_byte_data(chip->client, chip->dev_cfg->pri.x789.reg_reset, 0x12); if (err < 0) return err; err = i2c_smbus_write_byte_data(chip->client, chip->dev_cfg->pri.x789.reg_reset, 0x34); return err; } static int sx150x_init_hw(struct sx150x_chip *chip, struct sx150x_platform_data *pdata) { int err = 0; if (pdata->reset_during_probe) { err = sx150x_reset(chip); if (err < 0) return err; } if (chip->dev_cfg->model == SX150X_789) err = sx150x_i2c_write(chip->client, chip->dev_cfg->pri.x789.reg_misc, 0x01); else err = sx150x_i2c_write(chip->client, chip->dev_cfg->pri.x456.reg_advance, 0x04); if (err < 0) return err; err = sx150x_init_io(chip, chip->dev_cfg->reg_pullup, pdata->io_pullup_ena); if (err < 0) return err; err = sx150x_init_io(chip, chip->dev_cfg->reg_pulldn, pdata->io_pulldn_ena); if (err < 0) return err; if (chip->dev_cfg->model == SX150X_789) { err = sx150x_init_io(chip, chip->dev_cfg->pri.x789.reg_drain, pdata->io_open_drain_ena); if (err < 0) return err; err = sx150x_init_io(chip, chip->dev_cfg->pri.x789.reg_polarity, pdata->io_polarity); if (err < 0) return err; } else { /* Set all pins to work in normal mode */ err = sx150x_init_io(chip, chip->dev_cfg->pri.x456.reg_pld_mode, 0); if (err < 0) return err; } if (pdata->oscio_is_gpo) sx150x_set_oscio(chip, 0); return err; } static int sx150x_install_irq_chip(struct sx150x_chip *chip, int irq_summary, int irq_base) { int err; chip->irq_summary = irq_summary; chip->irq_base = irq_base; /* Add gpio chip to irq subsystem */ err = gpiochip_irqchip_add(&chip->gpio_chip, &chip->irq_chip, chip->irq_base, handle_edge_irq, IRQ_TYPE_EDGE_BOTH); if (err) { dev_err(&chip->client->dev, "could not connect irqchip to gpiochip\n"); return err; } err = devm_request_threaded_irq(&chip->client->dev, irq_summary, NULL, sx150x_irq_thread_fn, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_FALLING, chip->irq_chip.name, chip); if (err < 0) { chip->irq_summary = -1; chip->irq_base = -1; } return err; } static int sx150x_probe(struct i2c_client *client, const struct i2c_device_id *id) { static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA; struct sx150x_platform_data *pdata; struct sx150x_chip *chip; int rc; pdata = dev_get_platdata(&client->dev); if (!pdata) return -EINVAL; if (!i2c_check_functionality(client->adapter, i2c_funcs)) return -ENOSYS; chip = devm_kzalloc(&client->dev, sizeof(struct sx150x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; sx150x_init_chip(chip, client, id->driver_data, pdata); rc = sx150x_init_hw(chip, pdata); if (rc < 0) return rc; rc = gpiochip_add(&chip->gpio_chip); if (rc) return rc; if (pdata->irq_summary >= 0) { rc = sx150x_install_irq_chip(chip, pdata->irq_summary, pdata->irq_base); if (rc < 0) goto probe_fail_post_gpiochip_add; } i2c_set_clientdata(client, chip); return 0; probe_fail_post_gpiochip_add: gpiochip_remove(&chip->gpio_chip); return rc; } static int sx150x_remove(struct i2c_client *client) { struct sx150x_chip *chip; chip = i2c_get_clientdata(client); gpiochip_remove(&chip->gpio_chip); return 0; } static struct i2c_driver sx150x_driver = { .driver = { .name = "sx150x", .owner = THIS_MODULE, .of_match_table = of_match_ptr(sx150x_of_match), }, .probe = sx150x_probe, .remove = sx150x_remove, .id_table = sx150x_id, }; static int __init sx150x_init(void) { return i2c_add_driver(&sx150x_driver); } subsys_initcall(sx150x_init); static void __exit sx150x_exit(void) { return i2c_del_driver(&sx150x_driver); } module_exit(sx150x_exit); MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); MODULE_LICENSE("GPL v2");
gpl-2.0
agrabren/android_kernel_htc_shooter
arch/arm/mach-msm/qdsp6v2_1x/snddev_ecodec.c
147
9893
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/uaccess.h> #include <asm/io.h> #include <mach/clk.h> #include <mach/qdsp6v2_1x/audio_dev_ctl.h> #include <mach/qdsp6v2_1x/apr_audio.h> #include <mach/qdsp6v2_1x/snddev_ecodec.h> #include <mach/qdsp6v2_1x/q6afe.h> #define ECODEC_SAMPLE_RATE 8000 static struct q6v2audio_ecodec_ops default_audio_ops; static struct q6v2audio_ecodec_ops *audio_ops = &default_audio_ops; /* Context for each external codec device */ struct snddev_ecodec_state { struct snddev_ecodec_data *data; u32 sample_rate; }; /* Global state for the driver */ struct snddev_ecodec_drv_state { struct mutex dev_lock; int ref_cnt; /* ensure one rx device at a time */ struct clk *ecodec_clk; }; static struct snddev_ecodec_drv_state snddev_ecodec_drv; struct aux_pcm_state { unsigned int dout; unsigned int din; unsigned int syncout; unsigned int clkin_a; }; static struct aux_pcm_state the_aux_pcm_state; static int aux_pcm_gpios_request(void) { int rc = 0; uint32_t bt_config_gpio[] = { GPIO_CFG(the_aux_pcm_state.dout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.din, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.syncout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.clkin_a, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; pr_debug("%s\n", __func__); gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_ENABLE); gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_ENABLE); gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_ENABLE); return rc; } static void aux_pcm_gpios_free(void) { uint32_t bt_config_gpio[] = { GPIO_CFG(the_aux_pcm_state.dout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.din, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.syncout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(the_aux_pcm_state.clkin_a, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; pr_debug("%s\n", __func__); gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_DISABLE); gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_DISABLE); gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_DISABLE); gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_DISABLE); } static int get_aux_pcm_gpios(struct platform_device *pdev) { int rc = 0; struct resource *res; /* Claim all of the GPIOs. */ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_dout"); if (!res) { pr_aud_err("%s: failed to get gpio AUX PCM DOUT\n", __func__); return -ENODEV; } the_aux_pcm_state.dout = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_din"); if (!res) { pr_aud_err("%s: failed to get gpio AUX PCM DIN\n", __func__); return -ENODEV; } the_aux_pcm_state.din = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_syncout"); if (!res) { pr_aud_err("%s: failed to get gpio AUX PCM SYNC OUT\n", __func__); return -ENODEV; } the_aux_pcm_state.syncout = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_clkin_a"); if (!res) { pr_aud_err("%s: failed to get gpio AUX PCM CLKIN A\n", __func__); return -ENODEV; } the_aux_pcm_state.clkin_a = res->start; pr_aud_info("%s: dout = %u, din = %u , syncout = %u, clkin_a =%u\n", __func__, the_aux_pcm_state.dout, the_aux_pcm_state.din, the_aux_pcm_state.syncout, the_aux_pcm_state.clkin_a); return rc; } static int aux_pcm_probe(struct platform_device *pdev) { int rc = 0; pr_aud_info("%s:\n", __func__); rc = get_aux_pcm_gpios(pdev); if (rc < 0) { pr_aud_err("%s: GPIO configuration failed\n", __func__); return -ENODEV; } return rc; } static struct platform_driver aux_pcm_driver = { .probe = aux_pcm_probe, .driver = { .name = "msm_aux_pcm"} }; static int snddev_ecodec_open(struct msm_snddev_info *dev_info) { int rc; struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; union afe_port_config afe_config; pr_debug("%s\n", __func__); mutex_lock(&drv->dev_lock); if (dev_info->opened) { pr_aud_err("%s: ERROR: %s already opened\n", __func__, dev_info->name); mutex_unlock(&drv->dev_lock); return -EBUSY; } if (drv->ref_cnt != 0) { pr_debug("%s: opened %s\n", __func__, dev_info->name); drv->ref_cnt++; mutex_unlock(&drv->dev_lock); return 0; } pr_aud_info("%s: opening %s\n", __func__, dev_info->name); rc = aux_pcm_gpios_request(); if (rc < 0) { pr_aud_err("%s: GPIO request failed\n", __func__); return rc; } clk_reset(drv->ecodec_clk, CLK_RESET_ASSERT); afe_config.pcm.mode = AFE_PCM_CFG_MODE_PCM; afe_config.pcm.sync = AFE_PCM_CFG_SYNC_INT; afe_config.pcm.frame = AFE_PCM_CFG_FRM_256BPF; afe_config.pcm.quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD; afe_config.pcm.slot = 0; afe_config.pcm.data = AFE_PCM_CFG_CDATAOE_MASTER; rc = afe_open(PCM_RX, &afe_config, ECODEC_SAMPLE_RATE); if (rc < 0) { pr_aud_err("%s: afe open failed for PCM_RX\n", __func__); goto err_rx_afe; } rc = afe_open(PCM_TX, &afe_config, ECODEC_SAMPLE_RATE); if (rc < 0) { pr_aud_err("%s: afe open failed for PCM_TX\n", __func__); goto err_tx_afe; } rc = clk_set_rate(drv->ecodec_clk, 2048000); if (rc < 0) { pr_aud_err("%s: clk_set_rate failed\n", __func__); goto err_clk; } clk_enable(drv->ecodec_clk); clk_reset(drv->ecodec_clk, CLK_RESET_DEASSERT); drv->ref_cnt++; mutex_unlock(&drv->dev_lock); return 0; err_clk: afe_close(PCM_TX); err_tx_afe: afe_close(PCM_RX); err_rx_afe: aux_pcm_gpios_free(); mutex_unlock(&drv->dev_lock); return -ENODEV; } int snddev_ecodec_close(struct msm_snddev_info *dev_info) { struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; pr_debug("%s: closing %s\n", __func__, dev_info->name); mutex_lock(&drv->dev_lock); if (!dev_info->opened) { pr_aud_err("%s: ERROR: %s is not opened\n", __func__, dev_info->name); mutex_unlock(&drv->dev_lock); return -EPERM; } drv->ref_cnt--; if (drv->ref_cnt == 0) { pr_aud_info("%s: closing all devices\n", __func__); clk_disable(drv->ecodec_clk); aux_pcm_gpios_free(); afe_close(PCM_RX); afe_close(PCM_TX); } mutex_unlock(&drv->dev_lock); return 0; } int snddev_ecodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) { int rc = 0; if (!dev_info) { rc = -EINVAL; goto error; } return ECODEC_SAMPLE_RATE; error: return rc; } void htc_8x60_register_ecodec_ops(struct q6v2audio_ecodec_ops *ops) { audio_ops = ops; } static int snddev_ecodec_probe(struct platform_device *pdev) { int rc = 0; struct snddev_ecodec_data *pdata; struct msm_snddev_info *dev_info; struct snddev_ecodec_state *ecodec; pr_aud_info("%s:\n", __func__); if (!pdev || !pdev->dev.platform_data) { printk(KERN_ALERT "Invalid caller\n"); rc = -1; goto error; } pdata = pdev->dev.platform_data; ecodec = kzalloc(sizeof(struct snddev_ecodec_state), GFP_KERNEL); if (!ecodec) { rc = -ENOMEM; goto error; } dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); if (!dev_info) { kfree(ecodec); rc = -ENOMEM; goto error; } dev_info->name = pdata->name; dev_info->copp_id = pdata->copp_id; dev_info->private_data = (void *)ecodec; dev_info->dev_ops.open = snddev_ecodec_open; dev_info->dev_ops.close = snddev_ecodec_close; dev_info->dev_ops.set_freq = snddev_ecodec_set_freq; dev_info->dev_ops.enable_sidetone = NULL; dev_info->capability = pdata->capability; dev_info->opened = 0; msm_snddev_register(dev_info); ecodec->data = pdata; ecodec->sample_rate = ECODEC_SAMPLE_RATE; /* Default to 8KHz */ error: return rc; } struct platform_driver snddev_ecodec_driver = { .probe = snddev_ecodec_probe, .driver = {.name = "msm_snddev_ecodec"} }; int __init snddev_ecodec_init(void) { int rc = 0; struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; pr_aud_info("%s:\n", __func__); mutex_init(&drv->dev_lock); drv->ref_cnt = 0; drv->ecodec_clk = clk_get(NULL, "pcm_clk"); if (IS_ERR(drv->ecodec_clk)) { pr_aud_err("%s: could not get pcm_clk\n", __func__); return PTR_ERR(drv->ecodec_clk); } rc = platform_driver_register(&aux_pcm_driver); if (IS_ERR_VALUE(rc)) { pr_aud_err("%s: platform_driver_register for aux pcm failed\n", __func__); goto error_aux_pcm_platform_driver; } rc = platform_driver_register(&snddev_ecodec_driver); if (IS_ERR_VALUE(rc)) { pr_aud_err("%s: platform_driver_register for ecodec failed\n", __func__); goto error_ecodec_platform_driver; } pr_aud_info("%s: done\n", __func__); return 0; error_ecodec_platform_driver: platform_driver_unregister(&aux_pcm_driver); error_aux_pcm_platform_driver: clk_put(drv->ecodec_clk); pr_aud_err("%s: encounter error\n", __func__); return -ENODEV; } device_initcall(snddev_ecodec_init); MODULE_DESCRIPTION("ECodec Sound Device driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2");
gpl-2.0
Puri321/lge-kernel-bssq
arch/sparc/kernel/irq_64.c
147
24640
/* irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #include <linux/module.h> #include <linux/sched.h> #include <linux/linkage.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/ftrace.h> #include <linux/irq.h> #include <linux/kmemleak.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/atomic.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/iommu.h> #include <asm/upa.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/timer.h> #include <asm/smp.h> #include <asm/starfire.h> #include <asm/uaccess.h> #include <asm/cache.h> #include <asm/cpudata.h> #include <asm/auxio.h> #include <asm/head.h> #include <asm/hypervisor.h> #include <asm/cacheflush.h> #include "entry.h" #include "cpumap.h" #include "kstack.h" #define NUM_IVECS (IMAP_INR + 1) struct ino_bucket *ivector_table; unsigned long ivector_table_pa; /* On several sun4u processors, it is illegal to mix bypass and * non-bypass accesses. Therefore we access all INO buckets * using bypass accesses only. */ static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) { unsigned long ret; __asm__ __volatile__("ldxa [%1] %2, %0" : "=&r" (ret) : "r" (bucket_pa + offsetof(struct ino_bucket, __irq_chain_pa)), "i" (ASI_PHYS_USE_EC)); return ret; } static void bucket_clear_chain_pa(unsigned long bucket_pa) { __asm__ __volatile__("stxa %%g0, [%0] %1" : /* no outputs */ : "r" (bucket_pa + offsetof(struct ino_bucket, __irq_chain_pa)), "i" (ASI_PHYS_USE_EC)); } static unsigned int bucket_get_irq(unsigned long bucket_pa) { unsigned int ret; __asm__ __volatile__("lduwa [%1] %2, %0" : "=&r" (ret) : "r" (bucket_pa + offsetof(struct ino_bucket, __irq)), "i" (ASI_PHYS_USE_EC)); return ret; } static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) { __asm__ __volatile__("stwa %0, [%1] %2" : /* no outputs */ : "r" (irq), "r" (bucket_pa + offsetof(struct ino_bucket, __irq)), "i" (ASI_PHYS_USE_EC)); } #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) static struct { unsigned int dev_handle; unsigned int dev_ino; unsigned int in_use; } irq_table[NR_IRQS]; static DEFINE_SPINLOCK(irq_alloc_lock); unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) { unsigned long flags; unsigned char ent; BUILD_BUG_ON(NR_IRQS >= 256); spin_lock_irqsave(&irq_alloc_lock, flags); for (ent = 1; ent < NR_IRQS; ent++) { if (!irq_table[ent].in_use) break; } if (ent >= NR_IRQS) { printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); ent = 0; } else { irq_table[ent].dev_handle = dev_handle; irq_table[ent].dev_ino = dev_ino; irq_table[ent].in_use = 1; } spin_unlock_irqrestore(&irq_alloc_lock, flags); return ent; } #ifdef CONFIG_PCI_MSI void irq_free(unsigned int irq) { unsigned long flags; if (irq >= NR_IRQS) return; spin_lock_irqsave(&irq_alloc_lock, flags); irq_table[irq].in_use = 0; spin_unlock_irqrestore(&irq_alloc_lock, flags); } #endif /* * /proc/interrupts printing: */ int arch_show_interrupts(struct seq_file *p, int prec) { int j; seq_printf(p, "NMI: "); for_each_online_cpu(j) seq_printf(p, "%10u ", cpu_data(j).__nmi_count); seq_printf(p, " Non-maskable interrupts\n"); return 0; } static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) { unsigned int tid; if (this_is_starfire) { tid = starfire_translate(imap, cpuid); tid <<= IMAP_TID_SHIFT; tid &= IMAP_TID_UPA; } else { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) { tid = cpuid << IMAP_TID_SHIFT; tid &= IMAP_TID_JBUS; } else { unsigned int a = cpuid & 0x1f; unsigned int n = (cpuid >> 5) & 0x1f; tid = ((a << IMAP_AID_SHIFT) | (n << IMAP_NID_SHIFT)); tid &= (IMAP_AID_SAFARI | IMAP_NID_SAFARI); } } else { tid = cpuid << IMAP_TID_SHIFT; tid &= IMAP_TID_UPA; } } return tid; } struct irq_handler_data { unsigned long iclr; unsigned long imap; void (*pre_handler)(unsigned int, void *, void *); void *arg1; void *arg2; }; #ifdef CONFIG_SMP static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) { cpumask_t mask; int cpuid; cpumask_copy(&mask, affinity); if (cpus_equal(mask, cpu_online_map)) { cpuid = map_to_cpu(irq); } else { cpumask_t tmp; cpus_and(tmp, cpu_online_map, mask); cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp); } return cpuid; } #else #define irq_choose_cpu(irq, affinity) \ real_hard_smp_processor_id() #endif static void sun4u_irq_enable(struct irq_data *data) { struct irq_handler_data *handler_data = data->handler_data; if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; cpuid = irq_choose_cpu(data->irq, data->affinity); imap = handler_data->imap; tid = sun4u_compute_tid(imap, cpuid); val = upa_readq(imap); val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | IMAP_AID_SAFARI | IMAP_NID_SAFARI); val |= tid | IMAP_VALID; upa_writeq(val, imap); upa_writeq(ICLR_IDLE, handler_data->iclr); } } static int sun4u_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_handler_data *handler_data = data->handler_data; if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; cpuid = irq_choose_cpu(data->irq, mask); imap = handler_data->imap; tid = sun4u_compute_tid(imap, cpuid); val = upa_readq(imap); val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | IMAP_AID_SAFARI | IMAP_NID_SAFARI); val |= tid | IMAP_VALID; upa_writeq(val, imap); upa_writeq(ICLR_IDLE, handler_data->iclr); } return 0; } /* Don't do anything. The desc->status check for IRQ_DISABLED in * handler_irq() will skip the handler call and that will leave the * interrupt in the sent state. The next ->enable() call will hit the * ICLR register to reset the state machine. * * This scheme is necessary, instead of clearing the Valid bit in the * IMAP register, to handle the case of IMAP registers being shared by * multiple INOs (and thus ICLR registers). Since we use a different * virtual IRQ for each shared IMAP instance, the generic code thinks * there is only one user so it prematurely calls ->disable() on * free_irq(). * * We have to provide an explicit ->disable() method instead of using * NULL to get the default. The reason is that if the generic code * sees that, it also hooks up a default ->shutdown method which * invokes ->mask() which we do not want. See irq_chip_set_defaults(). */ static void sun4u_irq_disable(struct irq_data *data) { } static void sun4u_irq_eoi(struct irq_data *data) { struct irq_handler_data *handler_data = data->handler_data; if (likely(handler_data)) upa_writeq(ICLR_IDLE, handler_data->iclr); } static void sun4v_irq_enable(struct irq_data *data) { unsigned int ino = irq_table[data->irq].dev_ino; unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); int err; err = sun4v_intr_settarget(ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " "err(%d)\n", ino, cpuid, err); err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_setstate(%x): " "err(%d)\n", ino, err); err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", ino, err); } static int sun4v_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { unsigned int ino = irq_table[data->irq].dev_ino; unsigned long cpuid = irq_choose_cpu(data->irq, mask); int err; err = sun4v_intr_settarget(ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " "err(%d)\n", ino, cpuid, err); return 0; } static void sun4v_irq_disable(struct irq_data *data) { unsigned int ino = irq_table[data->irq].dev_ino; int err; err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_setenabled(%x): " "err(%d)\n", ino, err); } static void sun4v_irq_eoi(struct irq_data *data) { unsigned int ino = irq_table[data->irq].dev_ino; int err; err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_setstate(%x): " "err(%d)\n", ino, err); } static void sun4v_virq_enable(struct irq_data *data) { unsigned long cpuid, dev_handle, dev_ino; int err; cpuid = irq_choose_cpu(data->irq, data->affinity); dev_handle = irq_table[data->irq].dev_handle; dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " "err(%d)\n", dev_handle, dev_ino, cpuid, err); err = sun4v_vintr_set_state(dev_handle, dev_ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," "HV_INTR_STATE_IDLE): err(%d)\n", dev_handle, dev_ino, err); err = sun4v_vintr_set_valid(dev_handle, dev_ino, HV_INTR_ENABLED); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," "HV_INTR_ENABLED): err(%d)\n", dev_handle, dev_ino, err); } static int sun4v_virt_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { unsigned long cpuid, dev_handle, dev_ino; int err; cpuid = irq_choose_cpu(data->irq, mask); dev_handle = irq_table[data->irq].dev_handle; dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " "err(%d)\n", dev_handle, dev_ino, cpuid, err); return 0; } static void sun4v_virq_disable(struct irq_data *data) { unsigned long dev_handle, dev_ino; int err; dev_handle = irq_table[data->irq].dev_handle; dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_valid(dev_handle, dev_ino, HV_INTR_DISABLED); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," "HV_INTR_DISABLED): err(%d)\n", dev_handle, dev_ino, err); } static void sun4v_virq_eoi(struct irq_data *data) { unsigned long dev_handle, dev_ino; int err; dev_handle = irq_table[data->irq].dev_handle; dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_state(dev_handle, dev_ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," "HV_INTR_STATE_IDLE): err(%d)\n", dev_handle, dev_ino, err); } static struct irq_chip sun4u_irq = { .name = "sun4u", .irq_enable = sun4u_irq_enable, .irq_disable = sun4u_irq_disable, .irq_eoi = sun4u_irq_eoi, .irq_set_affinity = sun4u_set_affinity, .flags = IRQCHIP_EOI_IF_HANDLED, }; static struct irq_chip sun4v_irq = { .name = "sun4v", .irq_enable = sun4v_irq_enable, .irq_disable = sun4v_irq_disable, .irq_eoi = sun4v_irq_eoi, .irq_set_affinity = sun4v_set_affinity, .flags = IRQCHIP_EOI_IF_HANDLED, }; static struct irq_chip sun4v_virq = { .name = "vsun4v", .irq_enable = sun4v_virq_enable, .irq_disable = sun4v_virq_disable, .irq_eoi = sun4v_virq_eoi, .irq_set_affinity = sun4v_virt_set_affinity, .flags = IRQCHIP_EOI_IF_HANDLED, }; static void pre_flow_handler(struct irq_data *d) { struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); unsigned int ino = irq_table[d->irq].dev_ino; handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); } void irq_install_pre_handler(int irq, void (*func)(unsigned int, void *, void *), void *arg1, void *arg2) { struct irq_handler_data *handler_data = irq_get_handler_data(irq); handler_data->pre_handler = func; handler_data->arg1 = arg1; handler_data->arg2 = arg2; __irq_set_preflow_handler(irq, pre_flow_handler); } unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) { struct ino_bucket *bucket; struct irq_handler_data *handler_data; unsigned int irq; int ino; BUG_ON(tlb_type == hypervisor); ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; bucket = &ivector_table[ino]; irq = bucket_get_irq(__pa(bucket)); if (!irq) { irq = irq_alloc(0, ino); bucket_set_irq(__pa(bucket), irq); irq_set_chip_and_handler_name(irq, &sun4u_irq, handle_fasteoi_irq, "IVEC"); } handler_data = irq_get_handler_data(irq); if (unlikely(handler_data)) goto out; handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); prom_halt(); } irq_set_handler_data(irq, handler_data); handler_data->imap = imap; handler_data->iclr = iclr; out: return irq; } static unsigned int sun4v_build_common(unsigned long sysino, struct irq_chip *chip) { struct ino_bucket *bucket; struct irq_handler_data *handler_data; unsigned int irq; BUG_ON(tlb_type != hypervisor); bucket = &ivector_table[sysino]; irq = bucket_get_irq(__pa(bucket)); if (!irq) { irq = irq_alloc(0, sysino); bucket_set_irq(__pa(bucket), irq); irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); } handler_data = irq_get_handler_data(irq); if (unlikely(handler_data)) goto out; handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); prom_halt(); } irq_set_handler_data(irq, handler_data); /* Catch accidental accesses to these things. IMAP/ICLR handling * is done by hypervisor calls on sun4v platforms, not by direct * register accesses. */ handler_data->imap = ~0UL; handler_data->iclr = ~0UL; out: return irq; } unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) { unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); return sun4v_build_common(sysino, &sun4v_irq); } unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) { struct irq_handler_data *handler_data; unsigned long hv_err, cookie; struct ino_bucket *bucket; unsigned int irq; bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); if (unlikely(!bucket)) return 0; /* The only reference we store to the IRQ bucket is * by physical address which kmemleak can't see, tell * it that this object explicitly is not a leak and * should be scanned. */ kmemleak_not_leak(bucket); __flush_dcache_range((unsigned long) bucket, ((unsigned long) bucket + sizeof(struct ino_bucket))); irq = irq_alloc(devhandle, devino); bucket_set_irq(__pa(bucket), irq); irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, "IVEC"); handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); if (unlikely(!handler_data)) return 0; /* In order to make the LDC channel startup sequence easier, * especially wrt. locking, we do not let request_irq() enable * the interrupt. */ irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_handler_data(irq, handler_data); /* Catch accidental accesses to these things. IMAP/ICLR handling * is done by hypervisor calls on sun4v platforms, not by direct * register accesses. */ handler_data->imap = ~0UL; handler_data->iclr = ~0UL; cookie = ~__pa(bucket); hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); if (hv_err) { prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " "err=%lu\n", devhandle, devino, hv_err); prom_halt(); } return irq; } void ack_bad_irq(unsigned int irq) { unsigned int ino = irq_table[irq].dev_ino; if (!ino) ino = 0xdeadbeef; printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", ino, irq); } void *hardirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS]; void __irq_entry handler_irq(int pil, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; void *orig_sp; clear_softint(1 << pil); old_regs = set_irq_regs(regs); irq_enter(); /* Grab an atomic snapshot of the pending IVECs. */ __asm__ __volatile__("rdpr %%pstate, %0\n\t" "wrpr %0, %3, %%pstate\n\t" "ldx [%2], %1\n\t" "stx %%g0, [%2]\n\t" "wrpr %0, 0x0, %%pstate\n\t" : "=&r" (pstate), "=&r" (bucket_pa) : "r" (irq_work_pa(smp_processor_id())), "i" (PSTATE_IE) : "memory"); orig_sp = set_hardirq_stack(); while (bucket_pa) { unsigned long next_pa; unsigned int irq; next_pa = bucket_get_chain_pa(bucket_pa); irq = bucket_get_irq(bucket_pa); bucket_clear_chain_pa(bucket_pa); generic_handle_irq(irq); bucket_pa = next_pa; } restore_hardirq_stack(orig_sp); irq_exit(); set_irq_regs(old_regs); } void do_softirq(void) { unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); if (local_softirq_pending()) { void *orig_sp, *sp = softirq_stack[smp_processor_id()]; sp += THREAD_SIZE - 192 - STACK_BIAS; __asm__ __volatile__("mov %%sp, %0\n\t" "mov %1, %%sp" : "=&r" (orig_sp) : "r" (sp)); __do_softirq(); __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } local_irq_restore(flags); } #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) { unsigned int irq; for (irq = 0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); unsigned long flags; raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && !irqd_is_per_cpu(data)) { if (data->chip->irq_set_affinity) data->chip->irq_set_affinity(data, data->affinity, false); } raw_spin_unlock_irqrestore(&desc->lock, flags); } tick_ops->disable_irq(); } #endif struct sun5_timer { u64 count0; u64 limit0; u64 count1; u64 limit1; }; static struct sun5_timer *prom_timers; static u64 prom_limit0, prom_limit1; static void map_prom_timers(void) { struct device_node *dp; const unsigned int *addr; /* PROM timer node hangs out in the top level of device siblings... */ dp = of_find_node_by_path("/"); dp = dp->child; while (dp) { if (!strcmp(dp->name, "counter-timer")) break; dp = dp->sibling; } /* Assume if node is not present, PROM uses different tick mechanism * which we should not care about. */ if (!dp) { prom_timers = (struct sun5_timer *) 0; return; } /* If PROM is really using this, it must be mapped by him. */ addr = of_get_property(dp, "address", NULL); if (!addr) { prom_printf("PROM does not have timer mapped, trying to continue.\n"); prom_timers = (struct sun5_timer *) 0; return; } prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); } static void kill_prom_timer(void) { if (!prom_timers) return; /* Save them away for later. */ prom_limit0 = prom_timers->limit0; prom_limit1 = prom_timers->limit1; /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. * We turn both off here just to be paranoid. */ prom_timers->limit0 = 0; prom_timers->limit1 = 0; /* Wheee, eat the interrupt packet too... */ __asm__ __volatile__( " mov 0x40, %%g2\n" " ldxa [%%g0] %0, %%g1\n" " ldxa [%%g2] %1, %%g1\n" " stxa %%g0, [%%g0] %0\n" " membar #Sync\n" : /* no outputs */ : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) : "g1", "g2"); } void notrace init_irqwork_curcpu(void) { int cpu = hard_smp_processor_id(); trap_block[cpu].irq_worklist_pa = 0UL; } /* Please be very careful with register_one_mondo() and * sun4v_register_mondo_queues(). * * On SMP this gets invoked from the CPU trampoline before * the cpu has fully taken over the trap table from OBP, * and it's kernel stack + %g6 thread register state is * not fully cooked yet. * * Therefore you cannot make any OBP calls, not even prom_printf, * from these two routines. */ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) { unsigned long num_entries = (qmask + 1) / 64; unsigned long status; status = sun4v_cpu_qconf(type, paddr, num_entries); if (status != HV_EOK) { prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " "err %lu\n", type, paddr, num_entries, status); prom_halt(); } } void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) { struct trap_per_cpu *tb = &trap_block[this_cpu]; register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, tb->cpu_mondo_qmask); register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, tb->dev_mondo_qmask); register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, tb->resum_qmask); register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, tb->nonresum_qmask); } /* Each queue region must be a power of 2 multiple of 64 bytes in * size. The base real address must be aligned to the size of the * region. Thus, an 8KB queue must be 8KB aligned, for example. */ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) { unsigned long size = PAGE_ALIGN(qmask + 1); unsigned long order = get_order(size); unsigned long p; p = __get_free_pages(GFP_KERNEL, order); if (!p) { prom_printf("SUN4V: Error, cannot allocate queue.\n"); prom_halt(); } *pa_ptr = __pa(p); } static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) { #ifdef CONFIG_SMP unsigned long page; BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); page = get_zeroed_page(GFP_KERNEL); if (!page) { prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); prom_halt(); } tb->cpu_mondo_block_pa = __pa(page); tb->cpu_list_pa = __pa(page + 64); #endif } /* Allocate mondo and error queues for all possible cpus. */ static void __init sun4v_init_mondo_queues(void) { int cpu; for_each_possible_cpu(cpu) { struct trap_per_cpu *tb = &trap_block[cpu]; alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); alloc_one_queue(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask); } } static void __init init_send_mondo_info(void) { int cpu; for_each_possible_cpu(cpu) { struct trap_per_cpu *tb = &trap_block[cpu]; init_cpu_send_mondo_info(tb); } } static struct irqaction timer_irq_action = { .name = "timer", }; /* Only invoked on boot processor. */ void __init init_IRQ(void) { unsigned long size; map_prom_timers(); kill_prom_timer(); size = sizeof(struct ino_bucket) * NUM_IVECS; ivector_table = kzalloc(size, GFP_KERNEL); if (!ivector_table) { prom_printf("Fatal error, cannot allocate ivector_table\n"); prom_halt(); } __flush_dcache_range((unsigned long) ivector_table, ((unsigned long) ivector_table) + size); ivector_table_pa = __pa(ivector_table); if (tlb_type == hypervisor) sun4v_init_mondo_queues(); init_send_mondo_info(); if (tlb_type == hypervisor) { /* Load up the boot cpu's entries. */ sun4v_register_mondo_queues(hard_smp_processor_id()); } /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled. */ clear_softint(get_softint()); /* Now that ivector table is initialized, it is safe * to receive IRQ vector traps. We will normally take * one or two right now, in case some device PROM used * to boot us wants to speak to us. We just ignore them. */ __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "wrpr %%g1, 0x0, %%pstate" : /* No outputs */ : "i" (PSTATE_IE) : "g1"); irq_to_desc(0)->action = &timer_irq_action; }
gpl-2.0
devmapal/linux
drivers/iio/pressure/mpl115.c
147
4644
/* * mpl115.c - Support for Freescale MPL115A pressure/temperature sensor * * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net> * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * * TODO: shutdown pin * */ #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/delay.h> #include "mpl115.h" #define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */ #define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */ #define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */ #define MPL115_B1 0x06 /* 2 bit integer, 13 bit fraction */ #define MPL115_B2 0x08 /* 1 bit integer, 14 bit fraction */ #define MPL115_C12 0x0a /* 0 bit integer, 13 bit fraction */ #define MPL115_CONVERT 0x12 /* convert temperature and pressure */ struct mpl115_data { struct device *dev; struct mutex lock; s16 a0; s16 b1, b2; s16 c12; const struct mpl115_ops *ops; }; static int mpl115_request(struct mpl115_data *data) { int ret = data->ops->write(data->dev, MPL115_CONVERT, 0); if (ret < 0) return ret; usleep_range(3000, 4000); return 0; } static int mpl115_comp_pressure(struct mpl115_data *data, int *val, int *val2) { int ret; u16 padc, tadc; int a1, y1, pcomp; unsigned kpa; mutex_lock(&data->lock); ret = mpl115_request(data); if (ret < 0) goto done; ret = data->ops->read(data->dev, MPL115_PADC); if (ret < 0) goto done; padc = ret >> 6; ret = data->ops->read(data->dev, MPL115_TADC); if (ret < 0) goto done; tadc = ret >> 6; /* see Freescale AN3785 */ a1 = data->b1 + ((data->c12 * tadc) >> 11); y1 = (data->a0 << 10) + a1 * padc; /* compensated pressure with 4 fractional bits */ pcomp = (y1 + ((data->b2 * (int) tadc) >> 1)) >> 9; kpa = pcomp * (115 - 50) / 1023 + (50 << 4); *val = kpa >> 4; *val2 = (kpa & 15) * (1000000 >> 4); done: mutex_unlock(&data->lock); return ret; } static int mpl115_read_temp(struct mpl115_data *data) { int ret; mutex_lock(&data->lock); ret = mpl115_request(data); if (ret < 0) goto done; ret = data->ops->read(data->dev, MPL115_TADC); done: mutex_unlock(&data->lock); return ret; } static int mpl115_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct mpl115_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_PROCESSED: ret = mpl115_comp_pressure(data, val, val2); if (ret < 0) return ret; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_RAW: /* temperature -5.35 C / LSB, 472 LSB is 25 C */ ret = mpl115_read_temp(data); if (ret < 0) return ret; *val = ret >> 6; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: *val = -605; *val2 = 750000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: *val = -186; *val2 = 915888; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static const struct iio_chan_spec mpl115_channels[] = { { .type = IIO_PRESSURE, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, { .type = IIO_TEMP, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), }, }; static const struct iio_info mpl115_info = { .read_raw = &mpl115_read_raw, .driver_module = THIS_MODULE, }; int mpl115_probe(struct device *dev, const char *name, const struct mpl115_ops *ops) { struct mpl115_data *data; struct iio_dev *indio_dev; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); data->dev = dev; data->ops = ops; mutex_init(&data->lock); indio_dev->info = &mpl115_info; indio_dev->name = name; indio_dev->dev.parent = dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = mpl115_channels; indio_dev->num_channels = ARRAY_SIZE(mpl115_channels); ret = data->ops->init(data->dev); if (ret) return ret; ret = data->ops->read(data->dev, MPL115_A0); if (ret < 0) return ret; data->a0 = ret; ret = data->ops->read(data->dev, MPL115_B1); if (ret < 0) return ret; data->b1 = ret; ret = data->ops->read(data->dev, MPL115_B2); if (ret < 0) return ret; data->b2 = ret; ret = data->ops->read(data->dev, MPL115_C12); if (ret < 0) return ret; data->c12 = ret; return devm_iio_device_register(dev, indio_dev); } EXPORT_SYMBOL_GPL(mpl115_probe); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver"); MODULE_LICENSE("GPL");
gpl-2.0
mythos234/AndromedaB-LL-N910F
net/sctp/output.c
915
23504
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions handle output processing. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/init.h> #include <linux/slab.h> #include <net/inet_ecn.h> #include <net/ip.h> #include <net/icmp.h> #include <net/net_namespace.h> #include <linux/socket.h> /* for sa_family_t */ #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/checksum.h> /* Forward declarations for private helpers. */ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk); static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len); static void sctp_packet_reset(struct sctp_packet *packet) { packet->size = packet->overhead; packet->has_cookie_echo = 0; packet->has_sack = 0; packet->has_data = 0; packet->has_auth = 0; packet->ipfragok = 0; packet->auth = NULL; } /* Config a packet. * This appears to be a followup set of initializations. */ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, __u32 vtag, int ecn_capable) { struct sctp_chunk *chunk = NULL; SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); packet->vtag = vtag; if (ecn_capable && sctp_packet_empty(packet)) { chunk = sctp_get_ecne_prepend(packet->transport->asoc); /* If there a is a prepend chunk stick it on the list before * any other chunks get appended. */ if (chunk) sctp_packet_append_chunk(packet, chunk); } return packet; } /* Initialize the packet structure. */ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, struct sctp_transport *transport, __u16 sport, __u16 dport) { struct sctp_association *asoc = transport->asoc; size_t overhead; SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__, packet, transport); packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; INIT_LIST_HEAD(&packet->chunk_list); if (asoc) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); overhead = sp->pf->af->net_header_len; } else { overhead = sizeof(struct ipv6hdr); } overhead += sizeof(struct sctphdr); packet->overhead = overhead; sctp_packet_reset(packet); packet->vtag = 0; return packet; } /* Free a packet. */ void sctp_packet_free(struct sctp_packet *packet) { struct sctp_chunk *chunk, *tmp; SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); sctp_chunk_free(chunk); } } /* This routine tries to append the chunk to the offered packet. If adding * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk * is not present in the packet, it transmits the input packet. * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long * as it can fit in the packet, but any more data that does not fit in this * packet can be sent only after receiving the COOKIE_ACK. */ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, int one_packet) { sctp_xmit_t retval; int error = 0; SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, chunk); switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { case SCTP_XMIT_PMTU_FULL: if (!packet->has_cookie_echo) { error = sctp_packet_transmit(packet); if (error < 0) chunk->skb->sk->sk_err = -error; /* If we have an empty packet, then we can NOT ever * return PMTU_FULL. */ if (!one_packet) retval = sctp_packet_append_chunk(packet, chunk); } break; case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_OK: case SCTP_XMIT_NAGLE_DELAY: break; } return retval; } /* Try to bundle an auth chunk into the packet. */ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) { struct sctp_association *asoc = pkt->transport->asoc; struct sctp_chunk *auth; sctp_xmit_t retval = SCTP_XMIT_OK; /* if we don't have an association, we can't do authentication */ if (!asoc) return retval; /* See if this is an auth chunk we are bundling or if * auth is already bundled. */ if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) return retval; /* if the peer did not request this chunk to be authenticated, * don't do it */ if (!chunk->auth) return retval; auth = sctp_make_auth(asoc); if (!auth) return retval; retval = __sctp_packet_append_chunk(pkt, auth); if (retval != SCTP_XMIT_OK) sctp_chunk_free(auth); return retval; } /* Try to bundle a SACK with the packet. */ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; /* If sending DATA and haven't aleady bundled a SACK, try to * bundle one in to the packet. */ if (sctp_chunk_is_data(chunk) && !pkt->has_sack && !pkt->has_cookie_echo) { struct sctp_association *asoc; struct timer_list *timer; asoc = pkt->transport->asoc; timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; /* If the SACK timer is running, we have a pending SACK */ if (timer_pending(timer)) { struct sctp_chunk *sack; if (pkt->transport->sack_generation != pkt->transport->asoc->peer.sack_generation) return retval; asoc->a_rwnd = asoc->rwnd; sack = sctp_make_sack(asoc); if (sack) { retval = __sctp_packet_append_chunk(pkt, sack); if (retval != SCTP_XMIT_OK) { sctp_chunk_free(sack); goto out; } asoc->peer.sack_needed = 0; if (del_timer(timer)) sctp_association_put(asoc); } } } out: return retval; } /* Append a chunk to the offered packet reporting back any inability to do * so. */ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); /* Check to see if this chunk will fit into the packet */ retval = sctp_packet_will_fit(packet, chunk, chunk_len); if (retval != SCTP_XMIT_OK) goto finish; /* We believe that this chunk is OK to add to the packet */ switch (chunk->chunk_hdr->type) { case SCTP_CID_DATA: /* Account for the data being in the packet */ sctp_packet_append_data(packet, chunk); /* Disallow SACK bundling after DATA. */ packet->has_sack = 1; /* Disallow AUTH bundling after DATA */ packet->has_auth = 1; /* Let it be knows that packet has DATA in it */ packet->has_data = 1; /* timestamp the chunk for rtx purposes */ chunk->sent_at = jiffies; break; case SCTP_CID_COOKIE_ECHO: packet->has_cookie_echo = 1; break; case SCTP_CID_SACK: packet->has_sack = 1; if (chunk->asoc) chunk->asoc->stats.osacks++; break; case SCTP_CID_AUTH: packet->has_auth = 1; packet->auth = chunk; break; } /* It is OK to send this chunk. */ list_add_tail(&chunk->list, &packet->chunk_list); packet->size += chunk_len; chunk->transport = packet->transport; finish: return retval; } /* Append a chunk to the offered packet reporting back any inability to do * so. */ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, chunk); /* Data chunks are special. Before seeing what else we can * bundle into this packet, check to see if we are allowed to * send this DATA. */ if (sctp_chunk_is_data(chunk)) { retval = sctp_packet_can_append_data(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; } /* Try to bundle AUTH chunk */ retval = sctp_packet_bundle_auth(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; /* Try to bundle SACK chunk */ retval = sctp_packet_bundle_sack(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; retval = __sctp_packet_append_chunk(packet, chunk); finish: return retval; } static void sctp_packet_release_owner(struct sk_buff *skb) { sk_free(skb->sk); } static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); skb->sk = sk; skb->destructor = sctp_packet_release_owner; /* * The data chunks have already been accounted for in sctp_sendmsg(), * therefore only reserve a single byte to keep socket around until * the packet has been transmitted. */ atomic_inc(&sk->sk_wmem_alloc); } /* All packets are sent to the network through this function from * sctp_outq_tail(). * * The return value is a normal kernel error return value. */ int sctp_packet_transmit(struct sctp_packet *packet) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctphdr *sh; struct sk_buff *nskb; struct sctp_chunk *chunk, *tmp; struct sock *sk; int err = 0; int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); /* Do NOT generate a chunkless packet. */ if (list_empty(&packet->chunk_list)) return err; /* Set up convenience variables... */ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; /* Allocate the new skb. */ nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) goto nomem; /* Make sure the outbound skb has enough header room reserved. */ skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); /* Set the owning socket so that we know where to get the * destination IP address. */ sctp_packet_set_owner_w(nskb, sk); if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { sctp_assoc_sync_pmtu(sk, asoc); } } dst = dst_clone(tp->dst); skb_dst_set(nskb, dst); if (!dst) goto no_route; /* Build the SCTP header. */ sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); skb_reset_transport_header(nskb); sh->source = htons(packet->source_port); sh->dest = htons(packet->destination_port); /* From 6.8 Adler-32 Checksum Calculation: * After the packet is constructed (containing the SCTP common * header and one or more control or DATA chunks), the * transmitter shall: * * 1) Fill in the proper Verification Tag in the SCTP common * header and initialize the checksum field to 0's. */ sh->vtag = htonl(packet->vtag); sh->checksum = 0; /** * 6.10 Bundling * * An endpoint bundles chunks by simply including multiple * chunks in one outbound SCTP packet. ... */ /** * 3.2 Chunk Field Descriptions * * The total length of a chunk (including Type, Length and * Value fields) MUST be a multiple of 4 bytes. If the length * of the chunk is not a multiple of 4 bytes, the sender MUST * pad the chunk with all zero bytes and this padding is not * included in the chunk length field. The sender should * never pad with more than 3 bytes. * * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { /* 6.3.1 C4) When data is in flight and when allowed * by rule C5, a new RTT measurement MUST be made each * round trip. Furthermore, new RTT measurements * SHOULD be made no more than once per round-trip * for a given destination transport address. */ if (!tp->rto_pending) { chunk->rtt_in_progress = 1; tp->rto_pending = 1; } has_data = 1; } padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; if (padding) memset(skb_put(chunk->skb, padding), 0, padding); /* if this is the auth chunk that we are adding, * store pointer where it will be added and put * the auth into the packet. */ if (chunk == packet->auth) auth = skb_tail_pointer(nskb); cksum_buf_len += chunk->skb->len; memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", "*** Chunk", chunk, sctp_cname(SCTP_ST_CHUNK( chunk->chunk_hdr->type)), chunk->has_tsn ? "TSN" : "No TSN", chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, "length", ntohs(chunk->chunk_hdr->length), "chunk->skb->len", chunk->skb->len, "rtt_in_progress", chunk->rtt_in_progress); /* * If this is a control chunk, this is our last * reference. Free data chunks after they've been * acknowledged or have failed. */ if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } /* SCTP-AUTH, Section 6.2 * The sender MUST calculate the MAC as described in RFC2104 [2] * using the hash function H as described by the MAC Identifier and * the shared association key K based on the endpoint pair shared key * described by the shared key identifier. The 'data' used for the * computation of the AUTH-chunk is given by the AUTH chunk with its * HMAC field set to zero (as shown in Figure 6) followed by all * chunks that are placed after the AUTH chunk in the SCTP packet. */ if (auth) sctp_auth_calculate_hmac(asoc, nskb, (struct sctp_auth_chunk *)auth, GFP_ATOMIC); /* 2) Calculate the Adler-32 checksum of the whole packet, * including the SCTP common header and all the * chunks. * * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. */ if (!sctp_checksum_disable) { if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || (dst_xfrm(dst) != NULL) || packet->ipfragok) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); /* 3) Put the resultant value into the checksum field in the * common header, and leave the rest of the bits unchanged. */ sh->checksum = sctp_end_cksum(crc32); } else { /* no need to seed pseudo checksum for SCTP */ nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (skb_transport_header(nskb) - nskb->head); nskb->csum_offset = offsetof(struct sctphdr, checksum); } } /* IP layer ECN support * From RFC 2481 * "The ECN-Capable Transport (ECT) bit would be set by the * data sender to indicate that the end-points of the * transport protocol are ECN-capable." * * Now setting the ECT bit all the time, as it should not cause * any problems protocol-wise even if our peer ignores it. * * Note: The works for IPv6 layer checks this bit too later * in transmission. See IP6_ECN_flow_xmit(). */ (*tp->af_specific->ecn_capable)(nskb->sk); /* Set up the IP options. */ /* BUG: not implemented * For v4 this all lives somewhere in sk->sk_opt... */ /* Dump that on IP! */ if (asoc) { asoc->stats.opackets++; if (asoc->peer.last_sent_to != tp) /* Considering the multiple CPU scenario, this is a * "correcter" place for last_sent_to. --xguo */ asoc->peer.last_sent_to = tp; } if (has_data) { struct timer_list *timer; unsigned long timeout; /* Restart the AUTOCLOSE timer when sending data. */ if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); } } SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", nskb->len); nskb->local_df = packet->ipfragok; (*tp->af_specific->sctp_xmit)(nskb, tp); out: sctp_packet_reset(packet); return err; no_route: kfree_skb(nskb); IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); /* FIXME: Returning the 'err' will effect all the associations * associated with a socket, although only one of the paths of the * association is unreachable. * The real failure of a transport or association can be passed on * to the user via notifications. So setting this error may not be * required. */ /* err = -EHOSTUNREACH; */ err: /* Control chunks are unreliable so just drop them. DATA chunks * will get resent or dropped later. */ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } goto out; nomem: err = -ENOMEM; goto err; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* This private function check to see if a chunk can be added */ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; size_t datasize, rwnd, inflight, flight_size; struct sctp_transport *transport = packet->transport; struct sctp_association *asoc = transport->asoc; struct sctp_outq *q = &asoc->outqueue; /* RFC 2960 6.1 Transmission of DATA Chunks * * A) At any given time, the data sender MUST NOT transmit new data to * any destination transport address if its peer's rwnd indicates * that the peer has no buffer space (i.e. rwnd is 0, see Section * 6.2.1). However, regardless of the value of rwnd (including if it * is 0), the data sender can always have one DATA chunk in flight to * the receiver if allowed by cwnd (see rule B below). This rule * allows the sender to probe for a change in rwnd that the sender * missed due to the SACK having been lost in transit from the data * receiver to the data sender. */ rwnd = asoc->peer.rwnd; inflight = q->outstanding_bytes; flight_size = transport->flight_size; datasize = sctp_data_size(chunk); if (datasize > rwnd) { if (inflight > 0) { /* We have (at least) one data chunk in flight, * so we can't fall back to rule 6.1 B). */ retval = SCTP_XMIT_RWND_FULL; goto finish; } } /* RFC 2960 6.1 Transmission of DATA Chunks * * B) At any given time, the sender MUST NOT transmit new data * to a given transport address if it has cwnd or more bytes * of data outstanding to that transport address. */ /* RFC 7.2.4 & the Implementers Guide 2.8. * * 3) ... * When a Fast Retransmit is being performed the sender SHOULD * ignore the value of cwnd and SHOULD NOT delay retransmission. */ if (chunk->fast_retransmit != SCTP_NEED_FRTX) if (flight_size >= transport->cwnd) { retval = SCTP_XMIT_RWND_FULL; goto finish; } /* Nagle's algorithm to solve small-packet problem: * Inhibit the sending of new chunks when new outgoing data arrives * if any previously transmitted data on the connection remains * unacknowledged. */ if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && inflight && sctp_state(asoc, ESTABLISHED)) { unsigned int max = transport->pathmtu - packet->overhead; unsigned int len = chunk->skb->len + q->out_qlen; /* Check whether this chunk and all the rest of pending * data will fit or delay in hopes of bundling a full * sized packet. * Don't delay large message writes that may have been * fragmeneted into small peices. */ if ((len < max) && chunk->msg->can_delay) { retval = SCTP_XMIT_NAGLE_DELAY; goto finish; } } finish: return retval; } /* This private function does management things when adding DATA chunk */ static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { struct sctp_transport *transport = packet->transport; size_t datasize = sctp_data_size(chunk); struct sctp_association *asoc = transport->asoc; u32 rwnd = asoc->peer.rwnd; /* Keep track of how many bytes are in flight over this transport. */ transport->flight_size += datasize; /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; /* Update our view of the receiver's rwnd. */ if (datasize < rwnd) rwnd -= datasize; else rwnd = 0; asoc->peer.rwnd = rwnd; /* Has been accepted for transmission. */ if (!asoc->peer.prsctp_capable) chunk->msg->can_abandon = 0; sctp_chunk_assign_tsn(chunk); sctp_chunk_assign_ssn(chunk); } static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len) { size_t psize; size_t pmtu; int too_big; sctp_xmit_t retval = SCTP_XMIT_OK; psize = packet->size; pmtu = ((packet->transport->asoc) ? (packet->transport->asoc->pathmtu) : (packet->transport->pathmtu)); too_big = (psize + chunk_len > pmtu); /* Decide if we need to fragment or resubmit later. */ if (too_big) { /* It's OK to fragmet at IP level if any one of the following * is true: * 1. The packet is empty (meaning this chunk is greater * the MTU) * 2. The chunk we are adding is a control chunk * 3. The packet doesn't have any data in it yet and data * requires authentication. */ if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || (!packet->has_data && chunk->auth)) { /* We no longer do re-fragmentation. * Just fragment at the IP layer, if we * actually hit this condition */ packet->ipfragok = 1; } else { retval = SCTP_XMIT_PMTU_FULL; } } return retval; }
gpl-2.0
redfuture/linux-kernel
drivers/video/console/newport_con.c
1427
20596
/* * newport_con.c: Abscon for newport hardware * * (C) 1998 Thomas Bogendoerfer (tsbogend@alpha.franken.de) * (C) 1999 Ulf Carlsson (ulfc@thepuffingruop.com) * * This driver is based on sgicons.c and cons_newport. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/kd.h> #include <linux/selection.h> #include <linux/console.h> #include <linux/vt_kern.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/gio_device.h> #include <video/newport.h> #include <linux/linux_logo.h> #include <linux/font.h> #define FONT_DATA ((unsigned char *)font_vga_8x16.data) /* borrowed from fbcon.c */ #define REFCOUNT(fd) (((int *)(fd))[-1]) #define FNTSIZE(fd) (((int *)(fd))[-2]) #define FNTCHARCNT(fd) (((int *)(fd))[-3]) #define FONT_EXTRA_WORDS 3 static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; static int logo_active; static int topscan; static int xcurs_correction = 29; static int newport_xsize; static int newport_ysize; static int newport_has_init; static int newport_set_def_font(int unit, struct console_font *op); #define BMASK(c) (c << 24) #define RENDER(regs, cp) do { \ (regs)->go.zpattern = BMASK((cp)[0x0]); (regs)->go.zpattern = BMASK((cp)[0x1]); \ (regs)->go.zpattern = BMASK((cp)[0x2]); (regs)->go.zpattern = BMASK((cp)[0x3]); \ (regs)->go.zpattern = BMASK((cp)[0x4]); (regs)->go.zpattern = BMASK((cp)[0x5]); \ (regs)->go.zpattern = BMASK((cp)[0x6]); (regs)->go.zpattern = BMASK((cp)[0x7]); \ (regs)->go.zpattern = BMASK((cp)[0x8]); (regs)->go.zpattern = BMASK((cp)[0x9]); \ (regs)->go.zpattern = BMASK((cp)[0xa]); (regs)->go.zpattern = BMASK((cp)[0xb]); \ (regs)->go.zpattern = BMASK((cp)[0xc]); (regs)->go.zpattern = BMASK((cp)[0xd]); \ (regs)->go.zpattern = BMASK((cp)[0xe]); (regs)->go.zpattern = BMASK((cp)[0xf]); \ } while(0) #define TESTVAL 0xdeadbeef #define XSTI_TO_FXSTART(val) (((val) & 0xffff) << 11) static inline void newport_render_background(int xstart, int ystart, int xend, int yend, int ci) { newport_wait(npregs); npregs->set.wrmask = 0xffffffff; npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX | NPORT_DMODE0_STOPY); npregs->set.colori = ci; npregs->set.xystarti = (xstart << 16) | ((ystart + topscan) & 0x3ff); npregs->go.xyendi = ((xend + 7) << 16) | ((yend + topscan + 15) & 0x3ff); } static inline void newport_init_cmap(void) { unsigned short i; for (i = 0; i < 16; i++) { newport_bfwait(npregs); newport_cmap_setaddr(npregs, color_table[i]); newport_cmap_setrgb(npregs, default_red[i], default_grn[i], default_blu[i]); } } static const struct linux_logo *newport_show_logo(void) { #ifdef CONFIG_LOGO_SGI_CLUT224 const struct linux_logo *logo = fb_find_logo(8); const unsigned char *clut; const unsigned char *data; unsigned long i; if (!logo) return NULL; clut = logo->clut; data = logo->data; for (i = 0; i < logo->clutsize; i++) { newport_bfwait(npregs); newport_cmap_setaddr(npregs, i + 0x20); newport_cmap_setrgb(npregs, clut[0], clut[1], clut[2]); clut += 3; } newport_wait(npregs); npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | NPORT_DMODE0_CHOST); npregs->set.xystarti = ((newport_xsize - logo->width) << 16) | (0); npregs->set.xyendi = ((newport_xsize - 1) << 16); newport_wait(npregs); for (i = 0; i < logo->width*logo->height; i++) npregs->go.hostrw0 = *data++ << 24; return logo; #endif /* CONFIG_LOGO_SGI_CLUT224 */ } static inline void newport_clear_screen(int xstart, int ystart, int xend, int yend, int ci) { if (logo_active) return; newport_wait(npregs); npregs->set.wrmask = 0xffffffff; npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX | NPORT_DMODE0_STOPY); npregs->set.colori = ci; npregs->set.xystarti = (xstart << 16) | ystart; npregs->go.xyendi = (xend << 16) | yend; } static inline void newport_clear_lines(int ystart, int yend, int ci) { ystart = ((ystart << 4) + topscan) & 0x3ff; yend = ((yend << 4) + topscan + 15) & 0x3ff; newport_clear_screen(0, ystart, 1280 + 63, yend, ci); } static void newport_reset(void) { unsigned short treg; int i; newport_wait(npregs); treg = newport_vc2_get(npregs, VC2_IREG_CONTROL); newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg | VC2_CTRL_EVIDEO)); treg = newport_vc2_get(npregs, VC2_IREG_CENTRY); newport_vc2_set(npregs, VC2_IREG_RADDR, treg); npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | NPORT_DMODE_W2 | VC2_PROTOCOL); for (i = 0; i < 128; i++) { newport_bfwait(npregs); if (i == 92 || i == 94) npregs->set.dcbdata0.byshort.s1 = 0xff00; else npregs->set.dcbdata0.byshort.s1 = 0x0000; } newport_init_cmap(); /* turn off popup plane */ npregs->set.dcbmode = (DCB_XMAP0 | R_DCB_XMAP9_PROTOCOL | XM9_CRS_CONFIG | NPORT_DMODE_W1); npregs->set.dcbdata0.bybytes.b3 &= ~XM9_PUPMODE; npregs->set.dcbmode = (DCB_XMAP1 | R_DCB_XMAP9_PROTOCOL | XM9_CRS_CONFIG | NPORT_DMODE_W1); npregs->set.dcbdata0.bybytes.b3 &= ~XM9_PUPMODE; topscan = 0; npregs->cset.topscan = 0x3ff; npregs->cset.xywin = (4096 << 16) | 4096; /* Clear the screen. */ newport_clear_screen(0, 0, 1280 + 63, 1024, 0); } /* * calculate the actual screen size by reading * the video timing out of the VC2 */ static void newport_get_screensize(void) { int i, cols; unsigned short ventry, treg; unsigned short linetable[128]; /* should be enough */ ventry = newport_vc2_get(npregs, VC2_IREG_VENTRY); newport_vc2_set(npregs, VC2_IREG_RADDR, ventry); npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | NPORT_DMODE_W2 | VC2_PROTOCOL); for (i = 0; i < 128; i++) { newport_bfwait(npregs); linetable[i] = npregs->set.dcbdata0.byshort.s1; } newport_xsize = newport_ysize = 0; for (i = 0; i < ARRAY_SIZE(linetable) - 1 && linetable[i + 1]; i += 2) { cols = 0; newport_vc2_set(npregs, VC2_IREG_RADDR, linetable[i]); npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | NPORT_DMODE_W2 | VC2_PROTOCOL); do { newport_bfwait(npregs); treg = npregs->set.dcbdata0.byshort.s1; if ((treg & 1) == 0) cols += (treg >> 7) & 0xfe; if ((treg & 0x80) == 0) { newport_bfwait(npregs); treg = npregs->set.dcbdata0.byshort.s1; } } while ((treg & 0x8000) == 0); if (cols) { if (cols > newport_xsize) newport_xsize = cols; newport_ysize += linetable[i + 1]; } } printk("NG1: Screensize %dx%d\n", newport_xsize, newport_ysize); } static void newport_get_revisions(void) { unsigned int tmp; unsigned int board_rev; unsigned int rex3_rev; unsigned int vc2_rev; unsigned int cmap_rev; unsigned int xmap9_rev; unsigned int bt445_rev; unsigned int bitplanes; rex3_rev = npregs->cset.status & NPORT_STAT_VERS; npregs->set.dcbmode = (DCB_CMAP0 | NCMAP_PROTOCOL | NCMAP_REGADDR_RREG | NPORT_DMODE_W1); tmp = npregs->set.dcbdata0.bybytes.b3; cmap_rev = tmp & 7; board_rev = (tmp >> 4) & 7; bitplanes = ((board_rev > 1) && (tmp & 0x80)) ? 8 : 24; npregs->set.dcbmode = (DCB_CMAP1 | NCMAP_PROTOCOL | NCMAP_REGADDR_RREG | NPORT_DMODE_W1); tmp = npregs->set.dcbdata0.bybytes.b3; if ((tmp & 7) < cmap_rev) cmap_rev = (tmp & 7); vc2_rev = (newport_vc2_get(npregs, VC2_IREG_CONFIG) >> 5) & 7; npregs->set.dcbmode = (DCB_XMAP0 | R_DCB_XMAP9_PROTOCOL | XM9_CRS_REVISION | NPORT_DMODE_W1); xmap9_rev = npregs->set.dcbdata0.bybytes.b3 & 7; npregs->set.dcbmode = (DCB_BT445 | BT445_PROTOCOL | BT445_CSR_ADDR_REG | NPORT_DMODE_W1); npregs->set.dcbdata0.bybytes.b3 = BT445_REVISION_REG; npregs->set.dcbmode = (DCB_BT445 | BT445_PROTOCOL | BT445_CSR_REVISION | NPORT_DMODE_W1); bt445_rev = (npregs->set.dcbdata0.bybytes.b3 >> 4) - 0x0a; #define L(a) (char)('A'+(a)) printk ("NG1: Revision %d, %d bitplanes, REX3 revision %c, VC2 revision %c, xmap9 revision %c, cmap revision %c, bt445 revision %c\n", board_rev, bitplanes, L(rex3_rev), L(vc2_rev), L(xmap9_rev), L(cmap_rev ? (cmap_rev + 1) : 0), L(bt445_rev)); #undef L if (board_rev == 3) /* I don't know all affected revisions */ xcurs_correction = 21; } static void newport_exit(void) { int i; /* free memory used by user font */ for (i = 0; i < MAX_NR_CONSOLES; i++) newport_set_def_font(i, NULL); } /* Can't be __init, do_take_over_console may call it later */ static const char *newport_startup(void) { int i; npregs->cset.config = NPORT_CFG_GD0; if (newport_wait(npregs)) goto out_unmap; npregs->set.xstarti = TESTVAL; if (npregs->set._xstart.word != XSTI_TO_FXSTART(TESTVAL)) goto out_unmap; for (i = 0; i < MAX_NR_CONSOLES; i++) font_data[i] = FONT_DATA; newport_reset(); newport_get_revisions(); newport_get_screensize(); newport_has_init = 1; return "SGI Newport"; out_unmap: return NULL; } static void newport_init(struct vc_data *vc, int init) { int cols, rows; cols = newport_xsize / 8; rows = newport_ysize / 16; vc->vc_can_do_color = 1; if (init) { vc->vc_cols = cols; vc->vc_rows = rows; } else vc_resize(vc, cols, rows); } static void newport_deinit(struct vc_data *c) { if (!con_is_bound(&newport_con) && newport_has_init) { newport_exit(); newport_has_init = 0; } } static void newport_clear(struct vc_data *vc, int sy, int sx, int height, int width) { int xend = ((sx + width) << 3) - 1; int ystart = ((sy << 4) + topscan) & 0x3ff; int yend = (((sy + height) << 4) + topscan - 1) & 0x3ff; if (logo_active) return; if (ystart < yend) { newport_clear_screen(sx << 3, ystart, xend, yend, (vc->vc_color & 0xf0) >> 4); } else { newport_clear_screen(sx << 3, ystart, xend, 1023, (vc->vc_color & 0xf0) >> 4); newport_clear_screen(sx << 3, 0, xend, yend, (vc->vc_color & 0xf0) >> 4); } } static void newport_putc(struct vc_data *vc, int charattr, int ypos, int xpos) { unsigned char *p; p = &font_data[vc->vc_num][(charattr & 0xff) << 4]; charattr = (charattr >> 8) & 0xff; xpos <<= 3; ypos <<= 4; newport_render_background(xpos, ypos, xpos, ypos, (charattr & 0xf0) >> 4); /* Set the color and drawing mode. */ newport_wait(npregs); npregs->set.colori = charattr & 0xf; npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | NPORT_DMODE0_STOPX | NPORT_DMODE0_ZPENAB | NPORT_DMODE0_L32); /* Set coordinates for bitmap operation. */ npregs->set.xystarti = (xpos << 16) | ((ypos + topscan) & 0x3ff); npregs->set.xyendi = ((xpos + 7) << 16); newport_wait(npregs); /* Go, baby, go... */ RENDER(npregs, p); } static void newport_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos) { int i; int charattr; unsigned char *p; charattr = (scr_readw(s) >> 8) & 0xff; xpos <<= 3; ypos <<= 4; if (!logo_active) /* Clear the area behing the string */ newport_render_background(xpos, ypos, xpos + ((count - 1) << 3), ypos, (charattr & 0xf0) >> 4); newport_wait(npregs); /* Set the color and drawing mode. */ npregs->set.colori = charattr & 0xf; npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | NPORT_DMODE0_STOPX | NPORT_DMODE0_ZPENAB | NPORT_DMODE0_L32); for (i = 0; i < count; i++, xpos += 8) { p = &font_data[vc->vc_num][(scr_readw(s++) & 0xff) << 4]; newport_wait(npregs); /* Set coordinates for bitmap operation. */ npregs->set.xystarti = (xpos << 16) | ((ypos + topscan) & 0x3ff); npregs->set.xyendi = ((xpos + 7) << 16); /* Go, baby, go... */ RENDER(npregs, p); } } static void newport_cursor(struct vc_data *vc, int mode) { unsigned short treg; int xcurs, ycurs; switch (mode) { case CM_ERASE: treg = newport_vc2_get(npregs, VC2_IREG_CONTROL); newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg & ~(VC2_CTRL_ECDISP))); break; case CM_MOVE: case CM_DRAW: treg = newport_vc2_get(npregs, VC2_IREG_CONTROL); newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg | VC2_CTRL_ECDISP)); xcurs = (vc->vc_pos - vc->vc_visible_origin) / 2; ycurs = ((xcurs / vc->vc_cols) << 4) + 31; xcurs = ((xcurs % vc->vc_cols) << 3) + xcurs_correction; newport_vc2_set(npregs, VC2_IREG_CURSX, xcurs); newport_vc2_set(npregs, VC2_IREG_CURSY, ycurs); } } static int newport_switch(struct vc_data *vc) { static int logo_drawn = 0; topscan = 0; npregs->cset.topscan = 0x3ff; if (!logo_drawn) { if (newport_show_logo()) { logo_drawn = 1; logo_active = 1; } } return 1; } static int newport_blank(struct vc_data *c, int blank, int mode_switch) { unsigned short treg; if (blank == 0) { /* unblank console */ treg = newport_vc2_get(npregs, VC2_IREG_CONTROL); newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg | VC2_CTRL_EDISP)); } else { /* blank console */ treg = newport_vc2_get(npregs, VC2_IREG_CONTROL); newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg & ~(VC2_CTRL_EDISP))); } return 1; } static int newport_set_font(int unit, struct console_font *op) { int w = op->width; int h = op->height; int size = h * op->charcount; int i; unsigned char *new_data, *data = op->data, *p; /* ladis: when I grow up, there will be a day... and more sizes will * be supported ;-) */ if ((w != 8) || (h != 16) || (op->charcount != 256 && op->charcount != 512)) return -EINVAL; if (!(new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER))) return -ENOMEM; new_data += FONT_EXTRA_WORDS * sizeof(int); FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = op->charcount; REFCOUNT(new_data) = 0; /* usage counter */ p = new_data; for (i = 0; i < op->charcount; i++) { memcpy(p, data, h); data += 32; p += h; } /* check if font is already used by other console */ for (i = 0; i < MAX_NR_CONSOLES; i++) { if (font_data[i] != FONT_DATA && FNTSIZE(font_data[i]) == size && !memcmp(font_data[i], new_data, size)) { kfree(new_data - FONT_EXTRA_WORDS * sizeof(int)); /* current font is the same as the new one */ if (i == unit) return 0; new_data = font_data[i]; break; } } /* old font is user font */ if (font_data[unit] != FONT_DATA) { if (--REFCOUNT(font_data[unit]) == 0) kfree(font_data[unit] - FONT_EXTRA_WORDS * sizeof(int)); } REFCOUNT(new_data)++; font_data[unit] = new_data; return 0; } static int newport_set_def_font(int unit, struct console_font *op) { if (font_data[unit] != FONT_DATA) { if (--REFCOUNT(font_data[unit]) == 0) kfree(font_data[unit] - FONT_EXTRA_WORDS * sizeof(int)); font_data[unit] = FONT_DATA; } return 0; } static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name) { return newport_set_def_font(vc->vc_num, op); } static int newport_font_set(struct vc_data *vc, struct console_font *font, unsigned flags) { return newport_set_font(vc->vc_num, font); } static int newport_set_palette(struct vc_data *vc, unsigned char *table) { return -EINVAL; } static int newport_scrolldelta(struct vc_data *vc, int lines) { /* there is (nearly) no off-screen memory, so we can't scroll back */ return 0; } static int newport_scroll(struct vc_data *vc, int t, int b, int dir, int lines) { int count, x, y; unsigned short *s, *d; unsigned short chattr; logo_active = 0; /* it's time to disable the logo now.. */ if (t == 0 && b == vc->vc_rows) { if (dir == SM_UP) { topscan = (topscan + (lines << 4)) & 0x3ff; newport_clear_lines(vc->vc_rows - lines, vc->vc_rows - 1, (vc->vc_color & 0xf0) >> 4); } else { topscan = (topscan + (-lines << 4)) & 0x3ff; newport_clear_lines(0, lines - 1, (vc->vc_color & 0xf0) >> 4); } npregs->cset.topscan = (topscan - 1) & 0x3ff; return 0; } count = (b - t - lines) * vc->vc_cols; if (dir == SM_UP) { x = 0; y = t; s = (unsigned short *) (vc->vc_origin + vc->vc_size_row * (t + lines)); d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * t); while (count--) { chattr = scr_readw(s++); if (chattr != scr_readw(d)) { newport_putc(vc, chattr, y, x); scr_writew(chattr, d); } d++; if (++x == vc->vc_cols) { x = 0; y++; } } d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - lines)); x = 0; y = b - lines; for (count = 0; count < (lines * vc->vc_cols); count++) { if (scr_readw(d) != vc->vc_video_erase_char) { newport_putc(vc, vc->vc_video_erase_char, y, x); scr_writew(vc->vc_video_erase_char, d); } d++; if (++x == vc->vc_cols) { x = 0; y++; } } } else { x = vc->vc_cols - 1; y = b - 1; s = (unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - lines) - 2); d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * b - 2); while (count--) { chattr = scr_readw(s--); if (chattr != scr_readw(d)) { newport_putc(vc, chattr, y, x); scr_writew(chattr, d); } d--; if (x-- == 0) { x = vc->vc_cols - 1; y--; } } d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * t); x = 0; y = t; for (count = 0; count < (lines * vc->vc_cols); count++) { if (scr_readw(d) != vc->vc_video_erase_char) { newport_putc(vc, vc->vc_video_erase_char, y, x); scr_writew(vc->vc_video_erase_char, d); } d++; if (++x == vc->vc_cols) { x = 0; y++; } } } return 1; } static void newport_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int h, int w) { short xs, ys, xe, ye, xoffs, yoffs, tmp; xs = sx << 3; xe = ((sx + w) << 3) - 1; /* * as bmove is only used to move stuff around in the same line * (h == 1), we don't care about wrap arounds caused by topscan != 0 */ ys = ((sy << 4) + topscan) & 0x3ff; ye = (((sy + h) << 4) - 1 + topscan) & 0x3ff; xoffs = (dx - sx) << 3; yoffs = (dy - sy) << 4; if (xoffs > 0) { /* move to the right, exchange starting points */ tmp = xe; xe = xs; xs = tmp; } newport_wait(npregs); npregs->set.drawmode0 = (NPORT_DMODE0_S2S | NPORT_DMODE0_BLOCK | NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX | NPORT_DMODE0_STOPY); npregs->set.xystarti = (xs << 16) | ys; npregs->set.xyendi = (xe << 16) | ye; npregs->go.xymove = (xoffs << 16) | yoffs; } static int newport_dummy(struct vc_data *c) { return 0; } #define DUMMY (void *) newport_dummy const struct consw newport_con = { .owner = THIS_MODULE, .con_startup = newport_startup, .con_init = newport_init, .con_deinit = newport_deinit, .con_clear = newport_clear, .con_putc = newport_putc, .con_putcs = newport_putcs, .con_cursor = newport_cursor, .con_scroll = newport_scroll, .con_bmove = newport_bmove, .con_switch = newport_switch, .con_blank = newport_blank, .con_font_set = newport_font_set, .con_font_default = newport_font_default, .con_set_palette = newport_set_palette, .con_scrolldelta = newport_scrolldelta, .con_set_origin = DUMMY, .con_save_screen = DUMMY }; static int newport_probe(struct gio_device *dev, const struct gio_device_id *id) { unsigned long newport_addr; int err; if (!dev->resource.start) return -EINVAL; if (npregs) return -EBUSY; /* we only support one Newport as console */ newport_addr = dev->resource.start + 0xF0000; if (!request_mem_region(newport_addr, 0x10000, "Newport")) return -ENODEV; npregs = (struct newport_regs *)/* ioremap cannot fail */ ioremap(newport_addr, sizeof(struct newport_regs)); console_lock(); err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); return err; } static void newport_remove(struct gio_device *dev) { give_up_console(&newport_con); iounmap((void *)npregs); } static struct gio_device_id newport_ids[] = { { .id = 0x7e }, { .id = 0xff } }; MODULE_ALIAS("gio:7e"); static struct gio_driver newport_driver = { .name = "newport", .id_table = newport_ids, .probe = newport_probe, .remove = newport_remove, }; int __init newport_console_init(void) { return gio_register_driver(&newport_driver); } void __exit newport_console_exit(void) { gio_unregister_driver(&newport_driver); } module_init(newport_console_init); module_exit(newport_console_exit); MODULE_LICENSE("GPL");
gpl-2.0
ShadowElite22/Xperia-Z2-Z3
lib/genalloc.c
1427
11970
/* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/slab.h> #include <linux/export.h> #include <linux/bitmap.h> #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/vmalloc.h> static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val, nval; nval = *addr; do { val = nval; if (val & mask_to_set) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); return 0; } static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) { unsigned long val, nval; nval = *addr; do { val = nval; if ((val & mask_to_clear) != mask_to_clear) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); return 0; } /* * bitmap_set_ll - set the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Set @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users set the same bit, one user will return remain bits, otherwise * return 0. */ static int bitmap_set_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_set >= 0) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); if (set_bits_ll(p, mask_to_set)) return nr; } return 0; } /* * bitmap_clear_ll - clear the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Clear @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users clear the same bit, one user will return remain bits, * otherwise return 0. */ static int bitmap_clear_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_clear >= 0) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); if (clear_bits_ll(p, mask_to_clear)) return nr; } return 0; } /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ int gen_pool_add_virt(struct gen_pool *pool, u64 virt, phys_addr_t phys, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); if (nbytes <= PAGE_SIZE) chunk = kmalloc_node(nbytes, __GFP_ZERO, nid); else chunk = vmalloc(nbytes); if (unlikely(chunk == NULL)) return -ENOMEM; if (nbytes > PAGE_SIZE) memset(chunk, 0, nbytes); chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size; atomic_set(&chunk->avail, size); spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); spin_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add_virt); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64 addr) { struct gen_pool_chunk *chunk; phys_addr_t paddr = -1; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } } rcu_read_unlock(); return paddr; } EXPORT_SYMBOL(gen_pool_virt_to_phys); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { int nbytes; chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(end_bit) * sizeof(long); bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); if (nbytes <= PAGE_SIZE) kfree(chunk); else vfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc_aligned - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @alignment_order: Order the allocated space should be * aligned to (eg. 20 means allocated space * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. */ u64 gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, unsigned alignment_order) { struct gen_pool_chunk *chunk; u64 addr = 0, align_mask = 0; int order = pool->min_alloc_order; int nbits, start_bit = 0, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif if (size == 0) return 0; if (alignment_order > order) align_mask = (1 << (alignment_order - order)) - 1; nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { unsigned long chunk_size; if (size > atomic_read(&chunk->avail)) continue; chunk_size = (chunk->end_addr - chunk->start_addr) >> order; retry: start_bit = bitmap_find_next_zero_area_off(chunk->bits, chunk_size, 0, nbits, align_mask, chunk->start_addr >> order); if (start_bit >= chunk_size) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); if (remain) { remain = bitmap_clear_ll(chunk->bits, start_bit, nbits - remain); BUG_ON(remain); goto retry; } addr = chunk->start_addr + ((u64)start_bit << order); size = nbits << pool->min_alloc_order; atomic_sub(size, &chunk->avail); break; } rcu_read_unlock(); return addr; } EXPORT_SYMBOL(gen_pool_alloc_aligned); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ void gen_pool_free(struct gen_pool *pool, u64 addr, size_t size) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; atomic_add(size, &chunk->avail); rcu_read_unlock(); return; } } rcu_read_unlock(); BUG(); } EXPORT_SYMBOL(gen_pool_free); /** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool * @pool: the generic memory pool * @func: func to call * @data: additional data used by @func * * Call @func for every chunk of generic memory pool. The @func is * called with rcu_read_lock held. */ void gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) { struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) func(pool, chunk, data); rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_for_each_chunk); /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space * * Return available free space of the specified pool. */ size_t gen_pool_avail(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t avail = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) avail += atomic_read(&chunk->avail); rcu_read_unlock(); return avail; } EXPORT_SYMBOL_GPL(gen_pool_avail); /** * gen_pool_size - get size in bytes of memory managed by the pool * @pool: pool to get size * * Return size in bytes of memory managed by the pool. */ size_t gen_pool_size(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t size = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) size += chunk->end_addr - chunk->start_addr; rcu_read_unlock(); return size; } EXPORT_SYMBOL_GPL(gen_pool_size);
gpl-2.0
Dev-Ghost/android_kernel_taoshan
kernel/events/hw_breakpoint.c
2195
16080
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2007 Alan Stern * Copyright (C) IBM Corporation, 2009 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> * * Thanks to Ingo Molnar for his many suggestions. * * Authors: Alan Stern <stern@rowland.harvard.edu> * K.Prasad <prasad@linux.vnet.ibm.com> * Frederic Weisbecker <fweisbec@gmail.com> */ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. * This file contains the arch-independent routines. */ #include <linux/irqflags.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/hw_breakpoint.h> /* * Constraints data */ /* Number of pinned cpu breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); /* Number of pinned task breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); /* Number of non-pinned cpu/task breakpoints in a cpu */ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); static int nr_slots[TYPE_MAX]; /* Keep track of the breakpoints attached to tasks */ static LIST_HEAD(bp_task_head); static int constraints_initialized; /* Gather the number of total pinned and un-pinned bp in a cpuset */ struct bp_busy_slots { unsigned int pinned; unsigned int flexible; }; /* Serialize accesses to the above constraints */ static DEFINE_MUTEX(nr_bp_mutex); __weak int hw_breakpoint_weight(struct perf_event *bp) { return 1; } static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) { if (bp->attr.bp_type & HW_BREAKPOINT_RW) return TYPE_DATA; return TYPE_INST; } /* * Report the maximum number of pinned breakpoints a task * have in this cpu */ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) { int i; unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); for (i = nr_slots[type] - 1; i >= 0; i--) { if (tsk_pinned[i] > 0) return i + 1; } return 0; } /* * Count the number of breakpoints of the same type and same task. * The given event must be not on the list. */ static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) { struct task_struct *tsk = bp->hw.bp_target; struct perf_event *iter; int count = 0; list_for_each_entry(iter, &bp_task_head, hw.bp_list) { if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) count += hw_breakpoint_weight(iter); } return count; } /* * Report the number of pinned/un-pinned breakpoints we have in * a given cpu (cpu > -1) or in all of them (cpu = -1). */ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, enum bp_type_idx type) { int cpu = bp->cpu; struct task_struct *tsk = bp->hw.bp_target; if (cpu >= 0) { slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); if (!tsk) slots->pinned += max_task_bp_pinned(cpu, type); else slots->pinned += task_bp_pinned(bp, type); slots->flexible = per_cpu(nr_bp_flexible[type], cpu); return; } for_each_online_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned[type], cpu); if (!tsk) nr += max_task_bp_pinned(cpu, type); else nr += task_bp_pinned(bp, type); if (nr > slots->pinned) slots->pinned = nr; nr = per_cpu(nr_bp_flexible[type], cpu); if (nr > slots->flexible) slots->flexible = nr; } } /* * For now, continue to consider flexible as pinned, until we can * ensure no flexible event can ever be scheduled before a pinned event * in a same cpu. */ static void fetch_this_slot(struct bp_busy_slots *slots, int weight) { slots->pinned += weight; } /* * Add a pinned breakpoint for the given task in our constraint table */ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, enum bp_type_idx type, int weight) { unsigned int *tsk_pinned; int old_count = 0; int old_idx = 0; int idx = 0; old_count = task_bp_pinned(bp, type); old_idx = old_count - 1; idx = old_idx + weight; /* tsk_pinned[n] is the number of tasks having n breakpoints */ tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); if (enable) { tsk_pinned[idx]++; if (old_count > 0) tsk_pinned[old_idx]--; } else { tsk_pinned[idx]--; if (old_count > 0) tsk_pinned[old_idx]++; } } /* * Add/remove the given breakpoint in our constraint table */ static void toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) { int cpu = bp->cpu; struct task_struct *tsk = bp->hw.bp_target; /* Pinned counter cpu profiling */ if (!tsk) { if (enable) per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; else per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; return; } /* Pinned counter task profiling */ if (!enable) list_del(&bp->hw.bp_list); if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { for_each_online_cpu(cpu) toggle_bp_task_slot(bp, cpu, enable, type, weight); } if (enable) list_add_tail(&bp->hw.bp_list, &bp_task_head); } /* * Function to perform processor-specific cleanup during unregistration */ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) { /* * A weak stub function here for those archs that don't define * it inside arch/.../kernel/hw_breakpoint.c */ } /* * Contraints to check before allowing this new breakpoint counter: * * == Non-pinned counter == (Considered as pinned for now) * * - If attached to a single cpu, check: * * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM * * -> If there are already non-pinned counters in this cpu, it means * there is already a free slot for them. * Otherwise, we check that the maximum number of per task * breakpoints (for this cpu) plus the number of per cpu breakpoint * (for this cpu) doesn't cover every registers. * * - If attached to every cpus, check: * * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM * * -> This is roughly the same, except we check the number of per cpu * bp for every cpu and we keep the max one. Same for the per tasks * breakpoints. * * * == Pinned counter == * * - If attached to a single cpu, check: * * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM * * -> Same checks as before. But now the nr_bp_flexible, if any, must keep * one register at least (or they will never be fed). * * - If attached to every cpus, check: * * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM */ static int __reserve_bp_slot(struct perf_event *bp) { struct bp_busy_slots slots = {0}; enum bp_type_idx type; int weight; /* We couldn't initialize breakpoint constraints on boot */ if (!constraints_initialized) return -ENOMEM; /* Basic checks */ if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || bp->attr.bp_type == HW_BREAKPOINT_INVALID) return -EINVAL; type = find_slot_idx(bp); weight = hw_breakpoint_weight(bp); fetch_bp_busy_slots(&slots, bp, type); /* * Simulate the addition of this breakpoint to the constraints * and see the result. */ fetch_this_slot(&slots, weight); /* Flexible counters need to keep at least one slot */ if (slots.pinned + (!!slots.flexible) > nr_slots[type]) return -ENOSPC; toggle_bp_slot(bp, true, type, weight); return 0; } int reserve_bp_slot(struct perf_event *bp) { int ret; mutex_lock(&nr_bp_mutex); ret = __reserve_bp_slot(bp); mutex_unlock(&nr_bp_mutex); return ret; } static void __release_bp_slot(struct perf_event *bp) { enum bp_type_idx type; int weight; type = find_slot_idx(bp); weight = hw_breakpoint_weight(bp); toggle_bp_slot(bp, false, type, weight); } void release_bp_slot(struct perf_event *bp) { mutex_lock(&nr_bp_mutex); arch_unregister_hw_breakpoint(bp); __release_bp_slot(bp); mutex_unlock(&nr_bp_mutex); } /* * Allow the kernel debugger to reserve breakpoint slots without * taking a lock using the dbg_* variant of for the reserve and * release breakpoint slots. */ int dbg_reserve_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; return __reserve_bp_slot(bp); } int dbg_release_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; __release_bp_slot(bp); return 0; } static int validate_hw_breakpoint(struct perf_event *bp) { int ret; ret = arch_validate_hwbkpt_settings(bp); if (ret) return ret; if (arch_check_bp_in_kernelspace(bp)) { if (bp->attr.exclude_kernel) return -EINVAL; /* * Don't let unprivileged users set a breakpoint in the trap * path to avoid trap recursion attacks. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return 0; } int register_perf_hw_breakpoint(struct perf_event *bp) { int ret; ret = reserve_bp_slot(bp); if (ret) return ret; ret = validate_hw_breakpoint(bp); /* if arch_validate_hwbkpt_settings() fails then release bp slot */ if (ret) release_bp_slot(bp); return ret; } /** * register_user_hw_breakpoint - register a hardware breakpoint for user space * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs */ struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk) { return perf_event_create_kernel_counter(attr, -1, tsk, triggered, context); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); /** * modify_user_hw_breakpoint - modify a user-space hardware breakpoint * @bp: the breakpoint structure to modify * @attr: new breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs */ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { u64 old_addr = bp->attr.bp_addr; u64 old_len = bp->attr.bp_len; int old_type = bp->attr.bp_type; int err = 0; perf_event_disable(bp); bp->attr.bp_addr = attr->bp_addr; bp->attr.bp_type = attr->bp_type; bp->attr.bp_len = attr->bp_len; if (attr->disabled) goto end; err = validate_hw_breakpoint(bp); if (!err) perf_event_enable(bp); if (err) { bp->attr.bp_addr = old_addr; bp->attr.bp_type = old_type; bp->attr.bp_len = old_len; if (!bp->attr.disabled) perf_event_enable(bp); return err; } end: bp->attr.disabled = attr->disabled; return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); /** * unregister_hw_breakpoint - unregister a user-space hardware breakpoint * @bp: the breakpoint structure to unregister */ void unregister_hw_breakpoint(struct perf_event *bp) { if (!bp) return; perf_event_release_kernel(bp); } EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); /** * register_wide_hw_breakpoint - register a wide breakpoint in the kernel * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * * @return a set of per_cpu pointers to perf events */ struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) { struct perf_event * __percpu *cpu_events, **pevent, *bp; long err; int cpu; cpu_events = alloc_percpu(typeof(*cpu_events)); if (!cpu_events) return (void __percpu __force *)ERR_PTR(-ENOMEM); get_online_cpus(); for_each_online_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered, context); *pevent = bp; if (IS_ERR(bp)) { err = PTR_ERR(bp); goto fail; } } put_online_cpus(); return cpu_events; fail: for_each_online_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); if (IS_ERR(*pevent)) break; unregister_hw_breakpoint(*pevent); } put_online_cpus(); free_percpu(cpu_events); return (void __percpu __force *)ERR_PTR(err); } EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); /** * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel * @cpu_events: the per cpu set of events to unregister */ void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { int cpu; struct perf_event **pevent; for_each_possible_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); unregister_hw_breakpoint(*pevent); } free_percpu(cpu_events); } EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); static struct notifier_block hw_breakpoint_exceptions_nb = { .notifier_call = hw_breakpoint_exceptions_notify, /* we need to be notified first */ .priority = 0x7fffffff }; static void bp_perf_event_destroy(struct perf_event *event) { release_bp_slot(event); } static int hw_breakpoint_event_init(struct perf_event *bp) { int err; if (bp->attr.type != PERF_TYPE_BREAKPOINT) return -ENOENT; /* * no branch sampling for breakpoint events */ if (has_branch_stack(bp)) return -EOPNOTSUPP; err = register_perf_hw_breakpoint(bp); if (err) return err; bp->destroy = bp_perf_event_destroy; return 0; } static int hw_breakpoint_add(struct perf_event *bp, int flags) { if (!(flags & PERF_EF_START)) bp->hw.state = PERF_HES_STOPPED; return arch_install_hw_breakpoint(bp); } static void hw_breakpoint_del(struct perf_event *bp, int flags) { arch_uninstall_hw_breakpoint(bp); } static void hw_breakpoint_start(struct perf_event *bp, int flags) { bp->hw.state = 0; } static void hw_breakpoint_stop(struct perf_event *bp, int flags) { bp->hw.state = PERF_HES_STOPPED; } static int hw_breakpoint_event_idx(struct perf_event *bp) { return 0; } static struct pmu perf_breakpoint = { .task_ctx_nr = perf_sw_context, /* could eventually get its own */ .event_init = hw_breakpoint_event_init, .add = hw_breakpoint_add, .del = hw_breakpoint_del, .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, .event_idx = hw_breakpoint_event_idx, }; int __init init_hw_breakpoint(void) { unsigned int **task_bp_pinned; int cpu, err_cpu; int i; for (i = 0; i < TYPE_MAX; i++) nr_slots[i] = hw_breakpoint_slots(i); for_each_possible_cpu(cpu) { for (i = 0; i < TYPE_MAX; i++) { task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], GFP_KERNEL); if (!*task_bp_pinned) goto err_alloc; } } constraints_initialized = 1; perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); return register_die_notifier(&hw_breakpoint_exceptions_nb); err_alloc: for_each_possible_cpu(err_cpu) { for (i = 0; i < TYPE_MAX; i++) kfree(per_cpu(nr_task_bp_pinned[i], cpu)); if (err_cpu == cpu) break; } return -ENOMEM; }
gpl-2.0
k2wl/grand4.2.2
arch/sparc/mm/btfixup.c
2195
10217
/* btfixup.c: Boot time code fixup and relocator, so that * we can get rid of most indirect calls to achieve single * image sun4c and srmmu kernel. * * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/btfixup.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/system.h> #include <asm/cacheflush.h> #define BTFIXUP_OPTIMIZE_NOP #define BTFIXUP_OPTIMIZE_OTHER extern char *srmmu_name; static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for "; static char str_sun4c[] __initdata = "sun4c\n"; static char str_srmmu[] __initdata = "srmmu[%s]/"; static char str_iommu[] __initdata = "iommu\n"; static char str_iounit[] __initdata = "io-unit\n"; static int visited __initdata = 0; extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[]; extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[]; static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n"; static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n"; static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n"; static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n"; static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n"; static char wrong[] __initdata = "Wrong address for %c fixup %p\n"; static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n"; static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n"; static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n"; static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n"; static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; #ifdef BTFIXUP_OPTIMIZE_OTHER static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) { if (!fmangled) *addr = value; else { unsigned int *q = (unsigned int *)q1; if (*addr == 0x01000000) { /* Noped */ *q = value; } else if (addr[-1] == *q) { /* Moved */ addr[-1] = value; *q = value; } else { prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value); prom_halt(); } } } #else static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) { *addr = value; } #endif void __init btfixup(void) { unsigned int *p, *q; int type, count; unsigned insn; unsigned *addr; int fmangled = 0; void (*flush_cacheall)(void); if (!visited) { visited++; printk(version); if (ARCH_SUN4C) printk(str_sun4c); else { printk(str_srmmu, srmmu_name); if (sparc_cpu_model == sun4d) printk(str_iounit); else printk(str_iommu); } } for (p = ___btfixup_start; p < ___btfixup_end; ) { count = p[2]; q = p + 3; switch (type = *(unsigned char *)p) { case 'f': count = p[3]; q = p + 4; if (((p[0] & 1) || p[1]) && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) { prom_printf(wrong_f, p, p[1]); prom_halt(); } break; case 'b': if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) { prom_printf(wrong_b, p, p[1]); prom_halt(); } break; case 's': if (p[1] + 0x1000 >= 0x2000) { prom_printf(wrong_s, p, p[1]); prom_halt(); } break; case 'h': if (p[1] & 0x3ff) { prom_printf(wrong_h, p, p[1]); prom_halt(); } break; case 'a': if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) { prom_printf(wrong_a, p, p[1]); prom_halt(); } break; } if (p[0] & 1) { p[0] &= ~1; while (count) { fmangled = 0; addr = (unsigned *)*q; if (addr < _stext || addr >= _end) { prom_printf(wrong, type, p); prom_halt(); } insn = *addr; #ifdef BTFIXUP_OPTIMIZE_OTHER if (type != 'f' && q[1]) { insn = *(unsigned int *)q[1]; if (!insn || insn == 1) insn = *addr; else fmangled = 1; } #endif switch (type) { case 'f': /* CALL */ if (addr >= __start___ksymtab && addr < __stop___ksymtab) { *addr = p[1]; break; } else if (!q[1]) { if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */ *addr = (insn & 0xffc00000) | (p[1] >> 10); break; } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */ *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break; } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */ bad_f: prom_printf(insn_f, p, addr, insn, addr[1]); prom_halt(); } } else if (q[1] != 1) addr[1] = q[1]; if (p[2] == BTFIXUPCALL_NORM) { norm_f: *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2); q[1] = 0; break; } #ifndef BTFIXUP_OPTIMIZE_NOP goto norm_f; #else if (!(addr[1] & 0x80000000)) { if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */ goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */ } else { if ((addr[1] & 0x01800000) == 0x01800000) { if ((addr[1] & 0x01f80000) == 0x01e80000) { /* RESTORE */ goto norm_f; /* It is dangerous to patch that */ } goto bad_f; } if ((addr[1] & 0xffffe003) == 0x9e03e000) { /* ADD %O7, XX, %o7 */ int displac = (addr[1] << 19); displac = (displac >> 21) + 2; *addr = (0x10800000) + (displac & 0x3fffff); q[1] = addr[1]; addr[1] = p[2]; break; } if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000) goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */ if ((addr[1] & 0x3e000000) == 0x1e000000) goto norm_f; /* rd is %o7. We'd better take care. */ } if (p[2] == BTFIXUPCALL_NOP) { *addr = 0x01000000; q[1] = 1; break; } #ifndef BTFIXUP_OPTIMIZE_OTHER goto norm_f; #else if (addr[1] == 0x01000000) { /* NOP in the delay slot */ q[1] = addr[1]; *addr = p[2]; break; } if ((addr[1] & 0xc0000000) != 0xc0000000) { /* Not a memory operation */ if ((addr[1] & 0x30000000) == 0x10000000) { /* Ok, non-memory op with rd %oX */ if ((addr[1] & 0x3e000000) == 0x1c000000) goto bad_f; /* Aiee. Someone is playing strange %sp tricks */ if ((addr[1] & 0x3e000000) > 0x12000000 || ((addr[1] & 0x3e000000) == 0x12000000 && p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) || ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) { /* Nobody uses the result. We can nop it out. */ *addr = p[2]; q[1] = addr[1]; addr[1] = 0x01000000; break; } if ((addr[1] & 0xf1ffffe0) == 0x90100000) { /* MOV %reg, %Ox */ if ((addr[1] & 0x3e000000) == 0x10000000 && (p[2] & 0x7c000) == 0x20000) { /* Ok, it is call xx; mov reg, %o0 and call optimizes to doing something on %o0. Patch the patch. */ *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14); q[1] = addr[1]; addr[1] = 0x01000000; break; } if ((addr[1] & 0x3e000000) == 0x12000000 && p[2] == BTFIXUPCALL_STO1O0) { *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25); q[1] = addr[1]; addr[1] = 0x01000000; break; } } } } *addr = addr[1]; q[1] = addr[1]; addr[1] = p[2]; break; #endif /* BTFIXUP_OPTIMIZE_OTHER */ #endif /* BTFIXUP_OPTIMIZE_NOP */ case 'b': /* BLACKBOX */ /* Has to be sethi i, xx */ if ((insn & 0xc1c00000) != 0x01000000) { prom_printf(insn_b, p, addr, insn); prom_halt(); } else { void (*do_fixup)(unsigned *); do_fixup = (void (*)(unsigned *))p[1]; do_fixup(addr); } break; case 's': /* SIMM13 */ /* Has to be or %g0, i, xx */ if ((insn & 0xc1ffe000) != 0x80102000) { prom_printf(insn_s, p, addr, insn); prom_halt(); } set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff)); break; case 'h': /* SETHI */ /* Has to be sethi i, xx */ if ((insn & 0xc1c00000) != 0x01000000) { prom_printf(insn_h, p, addr, insn); prom_halt(); } set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); break; case 'a': /* HALF */ /* Has to be sethi i, xx or or %g0, i, xx */ if ((insn & 0xc1c00000) != 0x01000000 && (insn & 0xc1ffe000) != 0x80102000) { prom_printf(insn_a, p, addr, insn); prom_halt(); } if (p[1] & 0x3ff) set_addr(addr, q[1], fmangled, (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff)); else set_addr(addr, q[1], fmangled, (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10)); break; case 'i': /* INT */ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); else if ((insn & 0x80002000) == 0x80002000) /* %LO */ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); else { prom_printf(insn_i, p, addr, insn); prom_halt(); } break; } count -= 2; q += 2; } } else p = q + count; } #ifdef CONFIG_SMP flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); #else flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); #endif if (!flush_cacheall) { prom_printf(fca_und); prom_halt(); } (*flush_cacheall)(); }
gpl-2.0
varigit/VAR-SOM-AM33-SDK7-Kernel
drivers/gpu/drm/mga/mga_irq.c
2451
4959
/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell <keith@tungstengraphics.com> * Eric Anholt <anholt@FreeBSD.org> */ #include <drm/drmP.h> #include <drm/mga_drm.h> #include "mga_drv.h" u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) { const drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; if (crtc != 0) return 0; return atomic_read(&dev_priv->vbl_received); } irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; int status; int handled = 0; status = MGA_READ(MGA_STATUS); /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); atomic_inc(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); handled = 1; } /* SOFTRAP interrupt */ if (status & MGA_SOFTRAPEN) { const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); const u32 prim_end = MGA_READ(MGA_PRIMEND); MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); /* In addition to clearing the interrupt-pending bit, we * have to write to MGA_PRIMEND to re-start the DMA operation. */ if ((prim_start & ~0x03) != (prim_end & ~0x03)) MGA_WRITE(MGA_PRIMEND, prim_end); atomic_inc(&dev_priv->last_fence_retired); DRM_WAKEUP(&dev_priv->fence_queue); handled = 1; } if (handled) return IRQ_HANDLED; return IRQ_NONE; } int mga_enable_vblank(struct drm_device *dev, int crtc) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (crtc != 0) { DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc); return 0; } MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); return 0; } void mga_disable_vblank(struct drm_device *dev, int crtc) { if (crtc != 0) { DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", crtc); } /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have * a nice hardware counter that tracks the number of refreshes when * the interrupt is disabled, and the kernel doesn't know the refresh * rate to calculate an estimate. */ /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ } int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int cur_fence; int ret = 0; /* Assume that the user has missed the current sequence number * by about a day rather than she wants to wait for years * using fences. */ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) - *sequence) <= (1 << 23))); *sequence = cur_fence; return ret; } void mga_driver_irq_preinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; /* Disable *all* interrupts */ MGA_WRITE(MGA_IEN, 0); /* Clear bits if they're already high */ MGA_WRITE(MGA_ICLEAR, ~0); } int mga_driver_irq_postinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); /* Turn on soft trap interrupt. Vertical blank interrupts are enabled * in mga_enable_vblank. */ MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); return 0; } void mga_driver_irq_uninstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (!dev_priv) return; /* Disable *all* interrupts */ MGA_WRITE(MGA_IEN, 0); dev->irq_enabled = 0; }
gpl-2.0
Seinlin/codeaurora_kernel_msm
arch/powerpc/kernel/power4-pmu.c
2451
17098
/* * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER4 */ #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_LOWER_SH 6 #define PM_LOWER_MSK 1 #define PM_LOWER_MSKS 0x40 #define PM_BYTE_SH 4 /* Byte number of event bus to use */ #define PM_BYTE_MSK 3 #define PM_PMCSEL_MSK 7 /* * Unit code values */ #define PM_FPU 1 #define PM_ISU1 2 #define PM_IFU 3 #define PM_IDU0 4 #define PM_ISU1_ALT 6 #define PM_ISU2 7 #define PM_IFU_ALT 8 #define PM_LSU0 9 #define PM_LSU1 0xc #define PM_GPS 0xf /* * Bits in MMCR0 for POWER4 */ #define MMCR0_PMC1SEL_SH 8 #define MMCR0_PMC2SEL_SH 1 #define MMCR_PMCSEL_MSK 0x1f /* * Bits in MMCR1 for POWER4 */ #define MMCR1_TTM0SEL_SH 62 #define MMCR1_TTC0SEL_SH 61 #define MMCR1_TTM1SEL_SH 59 #define MMCR1_TTC1SEL_SH 58 #define MMCR1_TTM2SEL_SH 56 #define MMCR1_TTC2SEL_SH 55 #define MMCR1_TTM3SEL_SH 53 #define MMCR1_TTC3SEL_SH 52 #define MMCR1_TTMSEL_MSK 3 #define MMCR1_TD_CP_DBG0SEL_SH 50 #define MMCR1_TD_CP_DBG1SEL_SH 48 #define MMCR1_TD_CP_DBG2SEL_SH 46 #define MMCR1_TD_CP_DBG3SEL_SH 44 #define MMCR1_DEBUG0SEL_SH 43 #define MMCR1_DEBUG1SEL_SH 42 #define MMCR1_DEBUG2SEL_SH 41 #define MMCR1_DEBUG3SEL_SH 40 #define MMCR1_PMC1_ADDER_SEL_SH 39 #define MMCR1_PMC2_ADDER_SEL_SH 38 #define MMCR1_PMC6_ADDER_SEL_SH 37 #define MMCR1_PMC5_ADDER_SEL_SH 36 #define MMCR1_PMC8_ADDER_SEL_SH 35 #define MMCR1_PMC7_ADDER_SEL_SH 34 #define MMCR1_PMC3_ADDER_SEL_SH 33 #define MMCR1_PMC4_ADDER_SEL_SH 32 #define MMCR1_PMC3SEL_SH 27 #define MMCR1_PMC4SEL_SH 22 #define MMCR1_PMC5SEL_SH 17 #define MMCR1_PMC6SEL_SH 12 #define MMCR1_PMC7SEL_SH 7 #define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */ static short mmcr1_adder_bits[8] = { MMCR1_PMC1_ADDER_SEL_SH, MMCR1_PMC2_ADDER_SEL_SH, MMCR1_PMC3_ADDER_SEL_SH, MMCR1_PMC4_ADDER_SEL_SH, MMCR1_PMC5_ADDER_SEL_SH, MMCR1_PMC6_ADDER_SEL_SH, MMCR1_PMC7_ADDER_SEL_SH, MMCR1_PMC8_ADDER_SEL_SH }; /* * Bits in MMCRA */ #define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */ /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><> * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 * \SMPL ||\TTC3SEL * |\TTC_IFU_SEL * \TTM2SEL0 * * SMPL - SAMPLE_ENABLE constraint * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000 * * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2 * 55: UC1 error 0x0080_0000_0000_0000 * 54: FPU events needed 0x0040_0000_0000_0000 * 53: ISU1 events needed 0x0020_0000_0000_0000 * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000 * * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0 * 51: UC2 error 0x0008_0000_0000_0000 * 50: FPU events needed 0x0004_0000_0000_0000 * 49: IFU events needed 0x0002_0000_0000_0000 * 48: LSU0 events needed 0x0001_0000_0000_0000 * * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1 * 47: UC3 error 0x8000_0000_0000 * 46: LSU0 events needed 0x4000_0000_0000 * 45: IFU events needed 0x2000_0000_0000 * 44: IDU0|ISU2 events needed 0x1000_0000_0000 * 43: ISU1 events needed 0x0800_0000_0000 * * TTM2SEL0 * 42: 0 = IDU0 events needed * 1 = ISU2 events needed 0x0400_0000_0000 * * TTC_IFU_SEL * 41: 0 = IFU.U events needed * 1 = IFU.L events needed 0x0200_0000_0000 * * TTC3SEL * 40: 0 = LSU1.U events needed * 1 = LSU1.L events needed 0x0100_0000_0000 * * PS1 * 39: PS1 error 0x0080_0000_0000 * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 * * PS2 * 35: PS2 error 0x0008_0000_0000 * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 * * B0 * 28-31: Byte 0 event source 0xf000_0000 * 1 = FPU * 2 = ISU1 * 3 = IFU * 4 = IDU0 * 7 = ISU2 * 9 = LSU0 * c = LSU1 * f = GPS * * B1, B2, B3 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources * * P8 * 15: P8 error 0x8000 * 14-15: Count of events needing PMC8 * * P1..P7 * 0-13: Count of events needing PMC1..PMC7 * * Note: this doesn't allow events using IFU.U to be combined with events * using IFU.L, though that is feasible (using TTM0 and TTM2). However * there are no listed events for IFU.L (they are debug events not * verified for performance monitoring) so this shouldn't cause a * problem. */ static struct unitinfo { unsigned long value, mask; int unit; int lowerbit; } p4_unitinfo[16] = { [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 }, [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, [PM_ISU1_ALT] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, [PM_IFU_ALT] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 }, [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 }, [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 }, [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 }, [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 } }; static unsigned char direct_marked_event[8] = { (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */ (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */ (1<<3) | (1<<4) | (1<<5), /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ (1<<4), /* PMC8: PM_MRK_LSU_FIN */ }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int p4_marked_instr_event(u64 event) { int pmc, psel, unit, byte, bit; unsigned int mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if (pmc) { if (direct_marked_event[pmc - 1] & (1 << psel)) return 1; if (psel == 0) /* add events */ bit = (pmc <= 4)? pmc - 1: 8 - pmc; else if (psel == 6) /* decode events */ bit = 4; else return 0; } else bit = psel; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; mask = 0; switch (unit) { case PM_LSU1: if (event & PM_LOWER_MSKS) mask = 1 << 28; /* byte 7 bit 4 */ else mask = 6 << 24; /* byte 3 bits 1 and 2 */ break; case PM_LSU0: /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */ mask = 0x083dff00; } return (mask >> (byte * 8 + bit)) & 1; } static int p4_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, byte, unit, lower, sh; unsigned long mask = 0, value = 0; int grp = -1; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 8) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; grp = ((pmc - 1) >> 1) & 1; } unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; if (unit) { lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; /* * Bus events on bytes 0 and 2 can be counted * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. */ if (!pmc) grp = byte & 1; if (!p4_unitinfo[unit].unit) return -1; mask |= p4_unitinfo[unit].mask; value |= p4_unitinfo[unit].value; sh = p4_unitinfo[unit].lowerbit; if (sh > 1) value |= (unsigned long)lower << sh; else if (lower != sh) return -1; unit = p4_unitinfo[unit].unit; /* Set byte lane select field */ mask |= 0xfULL << (28 - 4 * byte); value |= (unsigned long)unit << (28 - 4 * byte); } if (grp == 0) { /* increment PMC1/2/5/6 field */ mask |= 0x8000000000ull; value |= 0x1000000000ull; } else { /* increment PMC3/4/7/8 field */ mask |= 0x800000000ull; value |= 0x100000000ull; } /* Marked instruction events need sample_enable set */ if (p4_marked_instr_event(event)) { mask |= 1ull << 56; value |= 1ull << 56; } /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) mask |= 1ull << 56; *maskp = mask; *valp = value; return 0; } static unsigned int ppc_inst_cmpl[] = { 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 }; static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, na; alt[0] = event; na = 1; /* 2 possibilities for PM_GRP_DISP_REJECT */ if (event == 0x8003 || event == 0x0224) { alt[1] = event ^ (0x8003 ^ 0x0224); return 2; } /* 2 possibilities for PM_ST_MISS_L1 */ if (event == 0x0c13 || event == 0x0c23) { alt[1] = event ^ (0x0c13 ^ 0x0c23); return 2; } /* several possibilities for PM_INST_CMPL */ for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) { if (event == ppc_inst_cmpl[i]) { for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j) if (j != i) alt[na++] = ppc_inst_cmpl[j]; break; } } return na; } static int p4_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel, lower; unsigned int ttm, grp; unsigned int pmc_inuse = 0; unsigned int pmc_grp_use[2]; unsigned char busbyte[4]; unsigned char unituse[16]; unsigned int unitlower = 0; int i; if (n_ev > 8) return -1; /* First pass to count resource use */ pmc_grp_use[0] = pmc_grp_use[1] = 0; memset(busbyte, 0, sizeof(busbyte)); memset(unituse, 0, sizeof(unituse)); for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); /* count 1/2/5/6 vs 3/4/7/8 use */ ++pmc_grp_use[((pmc - 1) >> 1) & 1]; } unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; if (unit) { if (!pmc) ++pmc_grp_use[byte & 1]; if (unit == 6 || unit == 8) /* map alt ISU1/IFU codes: 6->2, 8->3 */ unit = (unit >> 1) - 1; if (busbyte[byte] && busbyte[byte] != unit) return -1; busbyte[byte] = unit; lower <<= unit; if (unituse[unit] && lower != (unitlower & lower)) return -1; unituse[unit] = 1; unitlower |= lower; } } if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) return -1; /* * Assign resources and set multiplexer selects. * * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2. * Each TTMx can only select one unit, but since * units 2 and 6 are both ISU1, and 3 and 8 are both IFU, * we have some choices. */ if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) { unituse[6] = 1; /* Move 2 to 6 */ unituse[2] = 0; } if (unituse[3] & (unituse[1] | unituse[2])) { unituse[8] = 1; /* Move 3 to 8 */ unituse[3] = 0; unitlower = (unitlower & ~8) | ((unitlower & 8) << 5); } /* Check only one unit per TTMx */ if (unituse[1] + unituse[2] + unituse[3] > 1 || unituse[4] + unituse[6] + unituse[7] > 1 || unituse[8] + unituse[9] > 1 || (unituse[5] | unituse[10] | unituse[11] | unituse[13] | unituse[14])) return -1; /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH; mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH; mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH; /* Set TTCxSEL fields. */ if (unitlower & 0xe) mmcr1 |= 1ull << MMCR1_TTC0SEL_SH; if (unitlower & 0xf0) mmcr1 |= 1ull << MMCR1_TTC1SEL_SH; if (unitlower & 0xf00) mmcr1 |= 1ull << MMCR1_TTC2SEL_SH; if (unitlower & 0x7000) mmcr1 |= 1ull << MMCR1_TTC3SEL_SH; /* Set byte lane select fields. */ for (byte = 0; byte < 4; ++byte) { unit = busbyte[byte]; if (!unit) continue; if (unit == 0xf) { /* special case for GPS */ mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte); } else { if (!unituse[unit]) ttm = unit - 1; /* 2->1, 3->2 */ else ttm = unit >> 2; mmcr1 |= (unsigned long)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); } } /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; psel = event[i] & PM_PMCSEL_MSK; if (!pmc) { /* Bus event or 00xxx direct event (off or cycles) */ if (unit) psel |= 0x10 | ((byte & 2) << 2); for (pmc = 0; pmc < 8; ++pmc) { if (pmc_inuse & (1 << pmc)) continue; grp = (pmc >> 1) & 1; if (unit) { if (grp == (byte & 1)) break; } else if (pmc_grp_use[grp] < 4) { ++pmc_grp_use[grp]; break; } } pmc_inuse |= 1 << pmc; } else { /* Direct event */ --pmc; if (psel == 0 && (byte & 2)) /* add events on higher-numbered bus */ mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; else if (psel == 6 && byte == 3) /* seem to need to set sample_enable here */ mmcra |= MMCRA_SAMPLE_ENABLE; psel |= 8; } if (pmc <= 1) mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc); else mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); if (pmc == 7) /* PMC8 */ mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH; hwc[i] = pmc; if (p4_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; } if (pmc_inuse & 1) mmcr0 |= MMCR0_PMC1CE; if (pmc_inuse & 0xfe) mmcr0 |= MMCR0_PMCjCE; mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ /* Return MMCRx values */ mmcr[0] = mmcr0; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { /* * Setting the PMCxSEL field to 0 disables PMC x. * (Note that pmc is 0-based here, not 1-based.) */ if (pmc <= 1) { mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc)); } else { mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2))); if (pmc == 7) mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH); } } static int p4_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 7, [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x8c10, 0x3c10 }, [C(OP_WRITE)] = { 0x7c10, 0xc13 }, [C(OP_PREFETCH)] = { 0xc35, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc34, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x904 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x900 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x330, 0x331 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power4_pmu = { .name = "POWER4/4+", .n_counter = 8, .max_alternatives = 5, .add_fields = 0x0000001100005555ul, .test_adder = 0x0011083300000000ul, .compute_mmcr = p4_compute_mmcr, .get_constraint = p4_get_constraint, .get_alternatives = p4_get_alternatives, .disable_pmc = p4_disable_pmc, .n_generic = ARRAY_SIZE(p4_generic_events), .generic_events = p4_generic_events, .cache_events = &power4_cache_events, }; static int init_power4_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) return -ENODEV; return register_power_pmu(&power4_pmu); } early_initcall(init_power4_pmu);
gpl-2.0
CyanogenMod/android_kernel_bn_acclaim
drivers/infiniband/hw/nes/nes_utils.c
3219
31684
/* * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/init.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include "nes.h" static u16 nes_read16_eeprom(void __iomem *addr, u16 offset); u32 mh_detected; u32 mh_pauses_sent; /** * nes_read_eeprom_values - */ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter) { u32 mac_addr_low; u16 mac_addr_high; u16 eeprom_data; u16 eeprom_offset; u16 next_section_address; u16 sw_section_ver; u8 major_ver = 0; u8 minor_ver = 0; /* TODO: deal with EEPROM endian issues */ if (nesadapter->firmware_eeprom_offset == 0) { /* Read the EEPROM Parameters */ eeprom_data = nes_read16_eeprom(nesdev->regs, 0); nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data); eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) << ((eeprom_data & 0x0080) >> 7)); nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset); nesadapter->firmware_eeprom_offset = eeprom_offset; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4); if (eeprom_data != 0x5746) { nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data); return -1; } eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n", eeprom_offset + 2, eeprom_data); eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8); nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset); nesadapter->software_eeprom_offset = eeprom_offset; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4); if (eeprom_data != 0x5753) { printk("Not a valid Software Image = 0x%04X\n", eeprom_data); return -1; } sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6); nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n", sw_section_ver); eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8)); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x414d) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_offset = next_section_address; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8)); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x4f52) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_offset = next_section_address; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x5746) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_offset = next_section_address; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x5753) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_offset = next_section_address; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x414d) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_offset = next_section_address; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", eeprom_offset + 2, eeprom_data); next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); if (eeprom_data != 0x464e) { nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n", eeprom_data); goto no_fw_rev; } eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8); printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data); major_ver = (u8)(eeprom_data >> 8); minor_ver = (u8)(eeprom_data); if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) { nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n"); } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { nesadapter->virtwq = 1; } if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3)) nesadapter->send_term_ok = 1; nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + (u32)((u8)eeprom_data); eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10); printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data); nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) + (u32)((u8)eeprom_data); no_fw_rev: /* eeprom is valid */ eeprom_offset = nesadapter->software_eeprom_offset; eeprom_offset += 8; nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; mac_addr_low <<= 16; mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n", mac_addr_high, mac_addr_low); nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max); nesadapter->mac_addr_low = mac_addr_low; nesadapter->mac_addr_high = mac_addr_high; /* Read the Phy Type array */ eeprom_offset += 10; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->phy_type[0] = (u8)(eeprom_data >> 8); nesadapter->phy_type[1] = (u8)eeprom_data; /* Read the port array */ eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->phy_type[2] = (u8)(eeprom_data >> 8); nesadapter->phy_type[3] = (u8)eeprom_data; /* port_count is set by soft reset reg */ nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u," " port 2 -> %u, port 3 -> %u\n", nesadapter->port_count, nesadapter->phy_type[0], nesadapter->phy_type[1], nesadapter->phy_type[2], nesadapter->phy_type[3]); /* Read PD config array */ eeprom_offset += 10; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_size[0] = eeprom_data; eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_base[0] = eeprom_data; nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n", nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_size[1] = eeprom_data; eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_base[1] = eeprom_data; nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n", nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_size[2] = eeprom_data; eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_base[2] = eeprom_data; nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n", nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_size[3] = eeprom_data; eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->pd_config_base[3] = eeprom_data; nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n", nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]); /* Read Rx Pool Size */ eeprom_offset += 22; /* 46 */ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->rx_threshold = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n", nesadapter->tcp_timer_core_clk_divisor); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->iwarp_config = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->cm_config = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->wqm_wat = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat); eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); eeprom_offset += 2; nesadapter->core_clock = (((u32)eeprom_data) << 16) + nes_read16_eeprom(nesdev->regs, eeprom_offset); nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock); if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) { eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8; nesadapter->phy_index[1] = eeprom_data & 0x00ff; eeprom_offset += 2; eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8; nesadapter->phy_index[3] = eeprom_data & 0x00ff; } else { nesadapter->phy_index[0] = 4; nesadapter->phy_index[1] = 5; nesadapter->phy_index[2] = 6; nesadapter->phy_index[3] = 7; } nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n", nesadapter->phy_index[0],nesadapter->phy_index[1], nesadapter->phy_index[2],nesadapter->phy_index[3]); } return 0; } /** * nes_read16_eeprom */ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset) { writel(NES_EEPROM_READ_REQUEST + (offset >> 1), (void __iomem *)addr + NES_EEPROM_COMMAND); do { } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) & NES_EEPROM_READ_REQUEST); return readw((void __iomem *)addr + NES_EEPROM_DATA); } /** * nes_write_1G_phy_reg */ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) { u32 u32temp; u32 counter; nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); } /** * nes_read_1G_phy_reg * This routine only issues the read, the data must be read * separately. */ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) { u32 u32temp; u32 counter; /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", phy_addr, nesdev->mac_index); */ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) { nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); *data = 0xffff; } else { *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); } } /** * nes_write_10G_phy_reg */ void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_addr, u8 dev_addr, u16 phy_reg, u16 data) { u32 port_addr; u32 u32temp; u32 counter; port_addr = phy_addr; /* set address */ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); /* set data */ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); } /** * nes_read_10G_phy_reg * This routine only issues the read, the data must be read * separately. */ void nes_read_10G_phy_reg(struct nes_device *nesdev, u8 phy_addr, u8 dev_addr, u16 phy_reg) { u32 port_addr; u32 u32temp; u32 counter; port_addr = phy_addr; /* set address */ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); /* issue read */ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); for (counter = 0; counter < 100 ; counter++) { udelay(30); u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); if (u32temp & 1) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); break; } } if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); } /** * nes_get_cqp_request */ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) { unsigned long flags; struct nes_cqp_request *cqp_request = NULL; if (!list_empty(&nesdev->cqp_avail_reqs)) { spin_lock_irqsave(&nesdev->cqp.lock, flags); if (!list_empty(&nesdev->cqp_avail_reqs)) { cqp_request = list_entry(nesdev->cqp_avail_reqs.next, struct nes_cqp_request, list); list_del_init(&cqp_request->list); } spin_unlock_irqrestore(&nesdev->cqp.lock, flags); } if (cqp_request == NULL) { cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC); if (cqp_request) { cqp_request->dynamic = 1; INIT_LIST_HEAD(&cqp_request->list); } } if (cqp_request) { init_waitqueue_head(&cqp_request->waitq); cqp_request->waiting = 0; cqp_request->request_done = 0; cqp_request->callback = 0; init_waitqueue_head(&cqp_request->waitq); nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n", cqp_request); } else printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n", __func__); return cqp_request; } void nes_free_cqp_request(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { unsigned long flags; nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n", cqp_request, le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f); if (cqp_request->dynamic) { kfree(cqp_request); } else { spin_lock_irqsave(&nesdev->cqp.lock, flags); list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); } } void nes_put_cqp_request(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { if (atomic_dec_and_test(&cqp_request->refcount)) nes_free_cqp_request(nesdev, cqp_request); } /** * nes_post_cqp_request */ void nes_post_cqp_request(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { struct nes_hw_cqp_wqe *cqp_wqe; unsigned long flags; u32 cqp_head; u64 u64temp; spin_lock_irqsave(&nesdev->cqp.lock, flags); if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) & (nesdev->cqp.sq_size - 1)) != 1) && (list_empty(&nesdev->cqp_pending_reqs))) { cqp_head = nesdev->cqp.sq_head++; nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); barrier(); u64temp = (unsigned long)cqp_request; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX, u64temp); nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ," " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u," " waiting = %d, refcount = %d.\n", le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request, nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size, cqp_request->waiting, atomic_read(&cqp_request->refcount)); barrier(); /* Ring doorbell (1 WQEs) */ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id); barrier(); } else { nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X" " put on the pending queue.\n", cqp_request, le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX])); list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs); } spin_unlock_irqrestore(&nesdev->cqp.lock, flags); return; } /** * nes_arp_table */ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action) { struct nes_adapter *nesadapter = nesdev->nesadapter; int arp_index; int err = 0; __be32 tmp_addr; for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) { if (nesadapter->arp_table[arp_index].ip_addr == ip_addr) break; } if (action == NES_ARP_ADD) { if (arp_index != nesadapter->arp_table_size) { return -1; } arp_index = 0; err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps, nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index); if (err) { nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err); return err; } nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index); nesadapter->arp_table[arp_index].ip_addr = ip_addr; memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN); return arp_index; } /* DELETE or RESOLVE */ if (arp_index == nesadapter->arp_table_size) { tmp_addr = cpu_to_be32(ip_addr); nes_debug(NES_DBG_NETDEV, "MAC for %pI4 not in ARP table - cannot %s\n", &tmp_addr, action == NES_ARP_RESOLVE ? "resolve" : "delete"); return -1; } if (action == NES_ARP_RESOLVE) { nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index); return arp_index; } if (action == NES_ARP_DELETE) { nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index); nesadapter->arp_table[arp_index].ip_addr = 0; memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN); nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index); return arp_index; } return -1; } /** * nes_mh_fix */ void nes_mh_fix(unsigned long parm) { unsigned long flags; struct nes_device *nesdev = (struct nes_device *)parm; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_vnic *nesvnic; u32 used_chunks_tx; u32 temp_used_chunks_tx; u32 temp_last_used_chunks_tx; u32 used_chunks_mask; u32 mac_tx_frames_low; u32 mac_tx_frames_high; u32 mac_tx_pauses; u32 serdes_status; u32 reset_value; u32 tx_control; u32 tx_config; u32 tx_pause_quanta; u32 rx_control; u32 rx_config; u32 mac_exact_match; u32 mpp_debug; u32 i=0; u32 chunks_tx_progress = 0; spin_lock_irqsave(&nesadapter->phy_lock, flags); if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) { spin_unlock_irqrestore(&nesadapter->phy_lock, flags); goto no_mh_work; } nesadapter->mac_sw_state[0] = NES_MAC_SW_MH; spin_unlock_irqrestore(&nesadapter->phy_lock, flags); do { mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW); mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH); mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX); nesdev->mac_pause_frames_sent += mac_tx_pauses; used_chunks_mask = 0; temp_used_chunks_tx = used_chunks_tx; temp_last_used_chunks_tx = nesdev->last_used_chunks_tx; if (nesdev->netdev[0]) { nesvnic = netdev_priv(nesdev->netdev[0]); } else { break; } for (i=0; i<4; i++) { used_chunks_mask <<= 8; if (nesvnic->qp_nic_index[i] != 0xff) { used_chunks_mask |= 0xff; if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) { chunks_tx_progress = 1; } } temp_used_chunks_tx >>= 8; temp_last_used_chunks_tx >>= 8; } if ((mac_tx_frames_low) || (mac_tx_frames_high) || (!(used_chunks_tx&used_chunks_mask)) || (!(nesdev->last_used_chunks_tx&used_chunks_mask)) || (chunks_tx_progress) ) { nesdev->last_used_chunks_tx = used_chunks_tx; break; } nesdev->last_used_chunks_tx = used_chunks_tx; barrier(); nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005); mh_pauses_sent++; mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); if (mac_tx_pauses) { nesdev->mac_pause_frames_sent += mac_tx_pauses; break; } tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL); tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA); rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL); rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG); mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM); mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG); /* one last ditch effort to avoid a false positive */ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); if (mac_tx_pauses) { nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent; nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n"); break; } mh_detected++; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000); nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000); reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d); while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) && (i++ < 5000)) { /* mdelay(1); */ } nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000); if (nesadapter->OneG_Mode) { nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222); } else { nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222); } serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0); nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff); nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control); nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta); nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control); nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config); nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match); nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug); } while (0); nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE; no_mh_work: nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5); add_timer(&nesdev->nesadapter->mh_timer); } /** * nes_clc */ void nes_clc(unsigned long parm) { unsigned long flags; struct nes_device *nesdev = (struct nes_device *)parm; struct nes_adapter *nesadapter = nesdev->nesadapter; spin_lock_irqsave(&nesadapter->phy_lock, flags); nesadapter->link_interrupt_count[0] = 0; nesadapter->link_interrupt_count[1] = 0; nesadapter->link_interrupt_count[2] = 0; nesadapter->link_interrupt_count[3] = 0; spin_unlock_irqrestore(&nesadapter->phy_lock, flags); nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */ add_timer(&nesadapter->lc_timer); } /** * nes_dump_mem */ void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length) { char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; char *ptr; char hex_buf[80]; char ascii_buf[20]; int num_char; int num_ascii; int num_hex; if (!(nes_debug_level & dump_debug_level)) { return; } ptr = addr; if (length > 0x100) { nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100); length = 0x100; } nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length); memset(ascii_buf, 0, 20); memset(hex_buf, 0, 80); num_ascii = 0; num_hex = 0; for (num_char = 0; num_char < length; num_char++) { if (num_ascii == 8) { ascii_buf[num_ascii++] = ' '; hex_buf[num_hex++] = '-'; hex_buf[num_hex++] = ' '; } if (*ptr < 0x20 || *ptr > 0x7e) ascii_buf[num_ascii++] = '.'; else ascii_buf[num_ascii++] = *ptr; hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)]; hex_buf[num_hex++] = xlate[*ptr & 0x0f]; hex_buf[num_hex++] = ' '; ptr++; if (num_ascii >= 17) { /* output line and reset */ nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf); memset(ascii_buf, 0, 20); memset(hex_buf, 0, 80); num_ascii = 0; num_hex = 0; } } /* output the rest */ if (num_ascii) { while (num_ascii < 17) { if (num_ascii == 8) { hex_buf[num_hex++] = ' '; hex_buf[num_hex++] = ' '; } hex_buf[num_hex++] = ' '; hex_buf[num_hex++] = ' '; hex_buf[num_hex++] = ' '; num_ascii++; } nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf); } }
gpl-2.0
omnirom/android_kernel_htc_enrc2b
drivers/video/g364fb.c
3987
6899
/* $Id: g364fb.c,v 1.3 1998/08/28 22:43:00 tsbogend Exp $ * * linux/drivers/video/g364fb.c -- Mips Magnum frame buffer device * * (C) 1998 Thomas Bogendoerfer * * This driver is based on tgafb.c * * Copyright (C) 1997 Geert Uytterhoeven * Copyright (C) 1995 Jay Estabrook * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/console.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <asm/io.h> #include <asm/jazz.h> /* * Various defines for the G364 */ #define G364_MEM_BASE 0xe4400000 #define G364_PORT_BASE 0xe4000000 #define ID_REG 0xe4000000 /* Read only */ #define BOOT_REG 0xe4080000 #define TIMING_REG 0xe4080108 /* to 0x080170 - DON'T TOUCH! */ #define DISPLAY_REG 0xe4080118 #define VDISPLAY_REG 0xe4080150 #define MASK_REG 0xe4080200 #define CTLA_REG 0xe4080300 #define CURS_TOGGLE 0x800000 #define BIT_PER_PIX 0x700000 /* bits 22 to 20 of Control A */ #define DELAY_SAMPLE 0x080000 #define PORT_INTER 0x040000 #define PIX_PIPE_DEL 0x030000 /* bits 17 and 16 of Control A */ #define PIX_PIPE_DEL2 0x008000 /* same as above - don't ask me why */ #define TR_CYCLE_TOG 0x004000 #define VRAM_ADR_INC 0x003000 /* bits 13 and 12 of Control A */ #define BLANK_OFF 0x000800 #define FORCE_BLANK 0x000400 #define BLK_FUN_SWTCH 0x000200 #define BLANK_IO 0x000100 #define BLANK_LEVEL 0x000080 #define A_VID_FORM 0x000040 #define D_SYNC_FORM 0x000020 #define FRAME_FLY_PAT 0x000010 #define OP_MODE 0x000008 #define INTL_STAND 0x000004 #define SCRN_FORM 0x000002 #define ENABLE_VTG 0x000001 #define TOP_REG 0xe4080400 #define CURS_PAL_REG 0xe4080508 /* to 0x080518 */ #define CHKSUM_REG 0xe4080600 /* to 0x080610 - unused */ #define CURS_POS_REG 0xe4080638 #define CLR_PAL_REG 0xe4080800 /* to 0x080ff8 */ #define CURS_PAT_REG 0xe4081000 /* to 0x081ff8 */ #define MON_ID_REG 0xe4100000 /* unused */ #define RESET_REG 0xe4180000 /* Write only */ static struct fb_info fb_info; static struct fb_fix_screeninfo fb_fix __initdata = { .id = "G364 8plane", .smem_start = 0x40000000, /* physical address */ .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .ypanstep = 1, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo fb_var __initdata = { .bits_per_pixel = 8, .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .pixclock = 39722, .left_margin = 40, .right_margin = 24, .upper_margin = 32, .lower_margin = 11, .hsync_len = 96, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED, }; /* * Interface used by the world */ int g364fb_init(void); static int g364fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static int g364fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor); static int g364fb_blank(int blank, struct fb_info *info); static struct fb_ops g364fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = g364fb_setcolreg, .fb_pan_display = g364fb_pan_display, .fb_blank = g364fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_cursor = g364fb_cursor, }; int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { switch (cursor->enable) { case CM_ERASE: *(unsigned int *) CTLA_REG |= CURS_TOGGLE; break; case CM_MOVE: case CM_DRAW: *(unsigned int *) CTLA_REG &= ~CURS_TOGGLE; *(unsigned int *) CURS_POS_REG = ((x * fontwidth(p)) << 12) | ((y * fontheight(p)) - info->var.yoffset); break; } return 0; } /* * Pan or Wrap the Display * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ static int g364fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xoffset || var->yoffset + var->yres > var->yres_virtual) return -EINVAL; *(unsigned int *) TOP_REG = var->yoffset * var->xres; return 0; } /* * Blank the display. */ static int g364fb_blank(int blank, struct fb_info *info) { if (blank) *(unsigned int *) CTLA_REG |= FORCE_BLANK; else *(unsigned int *) CTLA_REG &= ~FORCE_BLANK; return 0; } /* * Set a single color register. Return != 0 for invalid regno. */ static int g364fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { volatile unsigned int *ptr = (volatile unsigned int *) CLR_PAL_REG; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; ptr[regno << 1] = (red << 16) | (green << 8) | blue; return 0; } /* * Initialisation */ int __init g364fb_init(void) { volatile unsigned int *pal_ptr = (volatile unsigned int *) CLR_PAL_REG; volatile unsigned int *curs_pal_ptr = (volatile unsigned int *) CURS_PAL_REG; int mem, i, j; if (fb_get_options("g364fb", NULL)) return -ENODEV; /* TBD: G364 detection */ /* get the resolution set by ARC console */ *(volatile unsigned int *) CTLA_REG &= ~ENABLE_VTG; fb_var.xres = (*((volatile unsigned int *) DISPLAY_REG) & 0x00ffffff) * 4; fb_var.yres = (*((volatile unsigned int *) VDISPLAY_REG) & 0x00ffffff) / 2; *(volatile unsigned int *) CTLA_REG |= ENABLE_VTG; /* setup cursor */ curs_pal_ptr[0] |= 0x00ffffff; curs_pal_ptr[2] |= 0x00ffffff; curs_pal_ptr[4] |= 0x00ffffff; /* * first set the whole cursor to transparent */ for (i = 0; i < 512; i++) *(unsigned short *) (CURS_PAT_REG + i * 8) = 0; /* * switch the last two lines to cursor palette 3 * we assume here, that FONTSIZE_X is 8 */ *(unsigned short *) (CURS_PAT_REG + 14 * 64) = 0xffff; *(unsigned short *) (CURS_PAT_REG + 15 * 64) = 0xffff; fb_var.xres_virtual = fbvar.xres; fb_fix.line_length = (xres / 8) * fb_var.bits_per_pixel; fb_fix.smem_start = 0x40000000; /* physical address */ /* get size of video memory; this is special for the JAZZ hardware */ mem = (r4030_read_reg32(JAZZ_R4030_CONFIG) >> 8) & 3; fb_fix.smem_len = (1 << (mem * 2)) * 512 * 1024; fb_var.yres_virtual = fb_fix.smem_len / fb_var.xres; fb_info.fbops = &g364fb_ops; fb_info.screen_base = (char *) G364_MEM_BASE; /* virtual kernel address */ fb_info.var = fb_var; fb_info.fix = fb_fix; fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; fb_alloc_cmap(&fb_info.cmap, 255, 0); if (register_framebuffer(&fb_info) < 0) return -EINVAL; return 0; } module_init(g364fb_init); MODULE_LICENSE("GPL");
gpl-2.0
sakindia123/android_kernel_samsung_j700F
drivers/s390/char/ctrlchar.c
3987
1608
/* * Unified handling of special chars. * * Copyright IBM Corp. 2001 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> * */ #include <linux/stddef.h> #include <asm/errno.h> #include <linux/sysrq.h> #include <linux/ctype.h> #include "ctrlchar.h" #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; static void ctrlchar_handle_sysrq(struct work_struct *work) { handle_sysrq(ctrlchar_sysrq_key); } static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); #endif /** * Check for special chars at start of input. * * @param buf Console input buffer. * @param len Length of valid data in buffer. * @param tty The tty struct for this console. * @return CTRLCHAR_NONE, if nothing matched, * CTRLCHAR_SYSRQ, if sysrq was encountered * otherwise char to be inserted logically or'ed * with CTRLCHAR_CTRL */ unsigned int ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) { if ((len < 2) || (len > 3)) return CTRLCHAR_NONE; /* hat is 0xb1 in codepage 037 (US etc.) and thus */ /* converted to 0x5e in ascii ('^') */ if ((buf[0] != '^') && (buf[0] != '\252')) return CTRLCHAR_NONE; #ifdef CONFIG_MAGIC_SYSRQ /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } #endif if (len != 2) return CTRLCHAR_NONE; switch (tolower(buf[1])) { case 'c': return INTR_CHAR(tty) | CTRLCHAR_CTRL; case 'd': return EOF_CHAR(tty) | CTRLCHAR_CTRL; case 'z': return SUSP_CHAR(tty) | CTRLCHAR_CTRL; } return CTRLCHAR_NONE; }
gpl-2.0
flwh/kernel_mtk6577
arch/score/kernel/setup.c
3987
4067
/* * arch/score/kernel/setup.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/screen_info.h> #include <asm-generic/sections.h> #include <asm/setup.h> struct screen_info screen_info; unsigned long kernelsp; static char command_line[COMMAND_LINE_SIZE]; static struct resource code_resource = { .name = "Kernel code",}; static struct resource data_resource = { .name = "Kernel data",}; static void __init bootmem_init(void) { unsigned long start_pfn, bootmap_size; unsigned long size = initrd_end - initrd_start; start_pfn = PFN_UP(__pa(&_end)); min_low_pfn = PFN_UP(MEMORY_START); max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE); max_mapnr = max_low_pfn - min_low_pfn; /* Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, min_low_pfn, max_low_pfn); add_active_range(0, min_low_pfn, max_low_pfn); free_bootmem(PFN_PHYS(start_pfn), (max_low_pfn - start_pfn) << PAGE_SHIFT); memory_present(0, start_pfn, max_low_pfn); /* Reserve space for the bootmem bitmap. */ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); if (size == 0) { printk(KERN_INFO "Initrd not found or empty"); goto disable; } if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { printk(KERN_ERR "Initrd extends beyond end of memory"); goto disable; } /* Reserve space for the initrd bitmap. */ reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", initrd_start, size); return; disable: printk(KERN_CONT " - disabling initrd\n"); initrd_start = 0; initrd_end = 0; } static void __init resource_init(void) { struct resource *res; code_resource.start = __pa(&_text); code_resource.end = __pa(&_etext) - 1; data_resource.start = __pa(&_etext); data_resource.end = __pa(&_edata) - 1; res = alloc_bootmem(sizeof(struct resource)); res->name = "System RAM"; res->start = MEMORY_START; res->end = MEMORY_START + MEMORY_SIZE - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); request_resource(res, &code_resource); request_resource(res, &data_resource); } void __init setup_arch(char **cmdline_p) { randomize_va_space = 0; *cmdline_p = command_line; cpu_cache_init(); tlb_init(); bootmem_init(); paging_init(); resource_init(); } static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; seq_printf(m, "processor\t\t: %ld\n", n); seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { unsigned long i = *pos; return i < 1 ? (void *) (i + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; static int __init topology_init(void) { return 0; } subsys_initcall(topology_init);
gpl-2.0
garwynn/D710BST_FI22_Kernel
drivers/scsi/aic94xx/aic94xx_tmf.c
4243
20152
/* * Aic94xx Task Management Functions * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/spinlock.h> #include <linux/gfp.h> #include "aic94xx.h" #include "aic94xx_sas.h" #include "aic94xx_hwi.h" /* ---------- Internal enqueue ---------- */ static int asd_enqueue_internal(struct asd_ascb *ascb, void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *), void (*timed_out)(unsigned long)) { int res; ascb->tasklet_complete = tasklet_complete; ascb->uldd_timer = 1; ascb->timer.data = (unsigned long) ascb; ascb->timer.function = timed_out; ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; add_timer(&ascb->timer); res = asd_post_ascb_list(ascb->ha, ascb, 1); if (unlikely(res)) del_timer(&ascb->timer); return res; } /* ---------- CLEAR NEXUS ---------- */ struct tasklet_completion_status { int dl_opcode; int tmf_state; u8 tag_valid:1; __be16 tag; }; #define DECLARE_TCS(tcs) \ struct tasklet_completion_status tcs = { \ .dl_opcode = 0, \ .tmf_state = 0, \ .tag_valid = 0, \ .tag = 0, \ } static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, struct done_list_struct *dl) { struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("%s: here\n", __func__); if (!del_timer(&ascb->timer)) { ASD_DPRINTK("%s: couldn't delete timer\n", __func__); return; } ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode); tcs->dl_opcode = dl->opcode; complete(ascb->completion); asd_ascb_free(ascb); } static void asd_clear_nexus_timedout(unsigned long data) { struct asd_ascb *ascb = (void *)data; struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("%s: here\n", __func__); tcs->dl_opcode = TMF_RESP_FUNC_FAILED; complete(ascb->completion); } #define CLEAR_NEXUS_PRE \ struct asd_ascb *ascb; \ struct scb *scb; \ int res; \ DECLARE_COMPLETION_ONSTACK(completion); \ DECLARE_TCS(tcs); \ \ ASD_DPRINTK("%s: PRE\n", __func__); \ res = 1; \ ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ if (!ascb) \ return -ENOMEM; \ \ ascb->completion = &completion; \ ascb->uldd_task = &tcs; \ scb = ascb->scb; \ scb->header.opcode = CLEAR_NEXUS #define CLEAR_NEXUS_POST \ ASD_DPRINTK("%s: POST\n", __func__); \ res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ asd_clear_nexus_timedout); \ if (res) \ goto out_err; \ ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \ wait_for_completion(&completion); \ res = tcs.dl_opcode; \ if (res == TC_NO_ERROR) \ res = TMF_RESP_FUNC_COMPLETE; \ return res; \ out_err: \ asd_ascb_free(ascb); \ return res int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) { struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_ADAPTER; CLEAR_NEXUS_POST; } int asd_clear_nexus_port(struct asd_sas_port *port) { struct asd_ha_struct *asd_ha = port->ha->lldd_ha; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_PORT; scb->clear_nexus.conn_mask = port->phy_mask; CLEAR_NEXUS_POST; } enum clear_nexus_phase { NEXUS_PHASE_PRE, NEXUS_PHASE_POST, NEXUS_PHASE_RESUME, }; static int asd_clear_nexus_I_T(struct domain_device *dev, enum clear_nexus_phase phase) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_I_T; switch (phase) { case NEXUS_PHASE_PRE: scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX; break; case NEXUS_PHASE_POST: scb->clear_nexus.flags = SEND_Q | NOTINQ; break; case NEXUS_PHASE_RESUME: scb->clear_nexus.flags = RESUME_TX; } scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) dev->lldd_dev); CLEAR_NEXUS_POST; } int asd_I_T_nexus_reset(struct domain_device *dev) { int res, tmp_res, i; struct sas_phy *phy = sas_find_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ int reset_type = (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); /* send a hard reset */ ASD_DPRINTK("sending %s reset to %s\n", reset_type ? "hard" : "soft", dev_name(&phy->dev)); res = sas_phy_reset(phy, reset_type); if (res == TMF_RESP_FUNC_COMPLETE) { /* wait for the maximum settle time */ msleep(500); /* clear all outstanding commands (keep nexus suspended) */ asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST); } for (i = 0 ; i < 3; i++) { tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); if (tmp_res == TC_RESUME) return res; msleep(500); } /* This is a bit of a problem: the sequencer is still suspended * and is refusing to resume. Hope it will resume on a bigger hammer * or the disk is lost */ dev_printk(KERN_ERR, &phy->dev, "Failed to resume nexus after reset 0x%x\n", tmp_res); return TMF_RESP_FUNC_FAILED; } static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_I_T_L; scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) dev->lldd_dev); CLEAR_NEXUS_POST; } static int asd_clear_nexus_tag(struct sas_task *task) { struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; struct asd_ascb *tascb = task->lldd_task; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_TAG; memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); scb->clear_nexus.ssp_task.tag = tascb->tag; if (task->dev->tproto) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) task->dev->lldd_dev); CLEAR_NEXUS_POST; } static int asd_clear_nexus_index(struct sas_task *task) { struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; struct asd_ascb *tascb = task->lldd_task; CLEAR_NEXUS_PRE; scb->clear_nexus.nexus = NEXUS_TRANS_CX; if (task->dev->tproto) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) task->dev->lldd_dev); scb->clear_nexus.index = cpu_to_le16(tascb->tc_index); CLEAR_NEXUS_POST; } /* ---------- TMFs ---------- */ static void asd_tmf_timedout(unsigned long data) { struct asd_ascb *ascb = (void *) data; struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("tmf timed out\n"); tcs->tmf_state = TMF_RESP_FUNC_FAILED; complete(ascb->completion); } static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, struct done_list_struct *dl) { struct asd_ha_struct *asd_ha = ascb->ha; unsigned long flags; struct tc_resp_sb_struct { __le16 index_escb; u8 len_lsb; u8 flags; } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; struct asd_ascb *escb; struct asd_dma_tok *edb; struct ssp_frame_hdr *fh; struct ssp_response_iu *ru; int res = TMF_RESP_FUNC_FAILED; ASD_DPRINTK("tmf resp tasklet\n"); spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); escb = asd_tc_index_find(&asd_ha->seq, (int)le16_to_cpu(resp_sb->index_escb)); spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); if (!escb) { ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); return res; } edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; ascb->tag = *(__be16 *)(edb->vaddr+4); fh = edb->vaddr + 16; ru = edb->vaddr + 16 + sizeof(*fh); res = ru->status; if (ru->datapres == 1) /* Response data present */ res = ru->resp_data[3]; #if 0 ascb->tag = fh->tag; #endif ascb->tag_valid = 1; asd_invalidate_edb(escb, edb_id); return res; } static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, struct done_list_struct *dl) { struct tasklet_completion_status *tcs; if (!del_timer(&ascb->timer)) return; tcs = ascb->uldd_task; ASD_DPRINTK("tmf tasklet complete\n"); tcs->dl_opcode = dl->opcode; if (dl->opcode == TC_SSP_RESP) { tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl); tcs->tag_valid = ascb->tag_valid; tcs->tag = ascb->tag; } complete(ascb->completion); asd_ascb_free(ascb); } static int asd_clear_nexus(struct sas_task *task) { int res = TMF_RESP_FUNC_FAILED; int leftover; struct asd_ascb *tascb = task->lldd_task; DECLARE_COMPLETION_ONSTACK(completion); unsigned long flags; tascb->completion = &completion; ASD_DPRINTK("task not done, clearing nexus\n"); if (tascb->tag_valid) res = asd_clear_nexus_tag(task); else res = asd_clear_nexus_index(task); leftover = wait_for_completion_timeout(&completion, AIC94XX_SCB_TIMEOUT); tascb->completion = NULL; ASD_DPRINTK("came back from clear nexus\n"); spin_lock_irqsave(&task->task_state_lock, flags); if (leftover < 1) res = TMF_RESP_FUNC_FAILED; if (task->task_state_flags & SAS_TASK_STATE_DONE) res = TMF_RESP_FUNC_COMPLETE; spin_unlock_irqrestore(&task->task_state_lock, flags); return res; } /** * asd_abort_task -- ABORT TASK TMF * @task: the task to be aborted * * Before calling ABORT TASK the task state flags should be ORed with * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called. * * Implements the ABORT TASK TMF, I_T_L_Q nexus. * Returns: SAS TMF responses (see sas_task.h), * -ENOMEM, * -SAS_QUEUE_FULL. * * When ABORT TASK returns, the caller of ABORT TASK checks first the * task->task_state_flags, and then the return value of ABORT TASK. * * If the task has task state bit SAS_TASK_STATE_DONE set, then the * task was completed successfully prior to it being aborted. The * caller of ABORT TASK has responsibility to call task->task_done() * xor free the task, depending on their framework. The return code * is TMF_RESP_FUNC_FAILED in this case. * * Else the SAS_TASK_STATE_DONE bit is not set, * If the return code is TMF_RESP_FUNC_COMPLETE, then * the task was aborted successfully. The caller of * ABORT TASK has responsibility to call task->task_done() * to finish the task, xor free the task depending on their * framework. * else * the ABORT TASK returned some kind of error. The task * was _not_ cancelled. Nothing can be assumed. * The caller of ABORT TASK may wish to retry. */ int asd_abort_task(struct sas_task *task) { struct asd_ascb *tascb = task->lldd_task; struct asd_ha_struct *asd_ha = tascb->ha; int res = 1; unsigned long flags; struct asd_ascb *ascb = NULL; struct scb *scb; int leftover; DECLARE_TCS(tcs); DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(tascb_completion); tascb->completion = &tascb_completion; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); res = TMF_RESP_FUNC_COMPLETE; ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); goto out_done; } spin_unlock_irqrestore(&task->task_state_lock, flags); ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); if (!ascb) return -ENOMEM; ascb->uldd_task = &tcs; ascb->completion = &completion; scb = ascb->scb; scb->header.opcode = SCB_ABORT_TASK; switch (task->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: scb->abort_task.proto_conn_rate = (1 << 5); /* STP */ break; case SAS_PROTOCOL_SSP: scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */ scb->abort_task.proto_conn_rate |= task->dev->linkrate; break; case SAS_PROTOCOL_SMP: break; default: break; } if (task->task_proto == SAS_PROTOCOL_SSP) { scb->abort_task.ssp_frame.frame_type = SSP_TASK; memcpy(scb->abort_task.ssp_frame.hashed_dest_addr, task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(scb->abort_task.ssp_frame.hashed_src_addr, task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK; scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF); } scb->abort_task.sister_scb = cpu_to_le16(0xFFFF); scb->abort_task.conn_handle = cpu_to_le16( (u16)(unsigned long)task->dev->lldd_dev); scb->abort_task.retry_count = 1; scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index); scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, asd_tmf_timedout); if (res) goto out_free; wait_for_completion(&completion); ASD_DPRINTK("tmf came back\n"); tascb->tag = tcs.tag; tascb->tag_valid = tcs.tag_valid; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); res = TMF_RESP_FUNC_COMPLETE; ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); goto out_done; } spin_unlock_irqrestore(&task->task_state_lock, flags); if (tcs.dl_opcode == TC_SSP_RESP) { /* The task to be aborted has been sent to the device. * We got a Response IU for the ABORT TASK TMF. */ if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE) res = asd_clear_nexus(task); else res = tcs.tmf_state; } else if (tcs.dl_opcode == TC_NO_ERROR && tcs.tmf_state == TMF_RESP_FUNC_FAILED) { /* timeout */ res = TMF_RESP_FUNC_FAILED; } else { /* In the following we assume that the managing layer * will _never_ make a mistake, when issuing ABORT * TASK. */ switch (tcs.dl_opcode) { default: res = asd_clear_nexus(task); /* fallthrough */ case TC_NO_ERROR: break; /* The task hasn't been sent to the device xor * we never got a (sane) Response IU for the * ABORT TASK TMF. */ case TF_NAK_RECV: res = TMF_RESP_INVALID_FRAME; break; case TF_TMF_TASK_DONE: /* done but not reported yet */ res = TMF_RESP_FUNC_FAILED; leftover = wait_for_completion_timeout(&tascb_completion, AIC94XX_SCB_TIMEOUT); spin_lock_irqsave(&task->task_state_lock, flags); if (leftover < 1) res = TMF_RESP_FUNC_FAILED; if (task->task_state_flags & SAS_TASK_STATE_DONE) res = TMF_RESP_FUNC_COMPLETE; spin_unlock_irqrestore(&task->task_state_lock, flags); break; case TF_TMF_NO_TAG: case TF_TMF_TAG_FREE: /* the tag is in the free list */ case TF_TMF_NO_CONN_HANDLE: /* no such device */ res = TMF_RESP_FUNC_COMPLETE; break; case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ res = TMF_RESP_FUNC_ESUPP; break; } } out_done: tascb->completion = NULL; if (res == TMF_RESP_FUNC_COMPLETE) { task->lldd_task = NULL; mb(); asd_ascb_free(tascb); } ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); return res; out_free: asd_ascb_free(ascb); ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); return res; } /** * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus * @dev: pointer to struct domain_device of interest * @lun: pointer to u8[8] which is the LUN * @tmf: the TMF to be performed (see sas_task.h or the SAS spec) * @index: the transaction context of the task to be queried if QT TMF * * This function is used to send ABORT TASK SET, CLEAR ACA, * CLEAR TASK SET, LU RESET and QUERY TASK TMFs. * * No SCBs should be queued to the I_T_L nexus when this SCB is * pending. * * Returns: TMF response code (see sas_task.h or the SAS spec) */ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, int tmf, int index) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; struct asd_ascb *ascb; int res = 1; struct scb *scb; DECLARE_COMPLETION_ONSTACK(completion); DECLARE_TCS(tcs); if (!(dev->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); if (!ascb) return -ENOMEM; ascb->completion = &completion; ascb->uldd_task = &tcs; scb = ascb->scb; if (tmf == TMF_QUERY_TASK) scb->header.opcode = QUERY_SSP_TASK; else scb->header.opcode = INITIATE_SSP_TMF; scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */ scb->ssp_tmf.proto_conn_rate |= dev->linkrate; /* SSP frame header */ scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK; memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr, dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF); /* SSP Task IU */ memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8); scb->ssp_tmf.ssp_task.tmf = tmf; scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF); scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long) dev->lldd_dev); scb->ssp_tmf.retry_count = 1; scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); if (tmf == TMF_QUERY_TASK) scb->ssp_tmf.index = cpu_to_le16(index); res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, asd_tmf_timedout); if (res) goto out_err; wait_for_completion(&completion); switch (tcs.dl_opcode) { case TC_NO_ERROR: res = TMF_RESP_FUNC_COMPLETE; break; case TF_NAK_RECV: res = TMF_RESP_INVALID_FRAME; break; case TF_TMF_TASK_DONE: res = TMF_RESP_FUNC_FAILED; break; case TF_TMF_NO_TAG: case TF_TMF_TAG_FREE: /* the tag is in the free list */ case TF_TMF_NO_CONN_HANDLE: /* no such device */ res = TMF_RESP_FUNC_COMPLETE; break; case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ res = TMF_RESP_FUNC_ESUPP; break; default: /* Allow TMF response codes to propagate upwards */ res = tcs.dl_opcode; break; } return res; out_err: asd_ascb_free(ascb); return res; } int asd_abort_task_set(struct domain_device *dev, u8 *lun) { int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0); if (res == TMF_RESP_FUNC_COMPLETE) asd_clear_nexus_I_T_L(dev, lun); return res; } int asd_clear_aca(struct domain_device *dev, u8 *lun) { int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0); if (res == TMF_RESP_FUNC_COMPLETE) asd_clear_nexus_I_T_L(dev, lun); return res; } int asd_clear_task_set(struct domain_device *dev, u8 *lun) { int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0); if (res == TMF_RESP_FUNC_COMPLETE) asd_clear_nexus_I_T_L(dev, lun); return res; } int asd_lu_reset(struct domain_device *dev, u8 *lun) { int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0); if (res == TMF_RESP_FUNC_COMPLETE) asd_clear_nexus_I_T_L(dev, lun); return res; } /** * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus * task: pointer to sas_task struct of interest * * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set, * or TMF_RESP_FUNC_SUCC if the task is in the task set. * * Normally the management layer sets the task to aborted state, * and then calls query task and then abort task. */ int asd_query_task(struct sas_task *task) { struct asd_ascb *ascb = task->lldd_task; int index; if (ascb) { index = ascb->tc_index; return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, TMF_QUERY_TASK, index); } return TMF_RESP_FUNC_COMPLETE; }
gpl-2.0
Thunderoar/custom_kernel_goyave
arch/tile/lib/memcpy_user_64.c
6803
2945
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Do memcpy(), but trap and return "n" when a load or store faults. * * Note: this idiom only works when memcpy() compiles to a leaf function. * Here leaf function not only means it does not have calls, but also * requires no stack operations (sp, stack frame pointer) and no * use of callee-saved registers, else "jrp lr" will be incorrect since * unwinding stack frame is bypassed. Since memcpy() is not complex so * these conditions are satisfied here, but we need to be careful when * modifying this file. This is not a clean solution but is the best * one so far. * * Also note that we are capturing "n" from the containing scope here. */ #define _ST(p, inst, v) \ ({ \ asm("1: " #inst " %0, %1;" \ ".pushsection .coldtext.memcpy,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".quad 1b, 2b;" \ ".popsection" \ : "=m" (*(p)) : "r" (v), "r" (n)); \ }) #define _LD(p, inst) \ ({ \ unsigned long __v; \ asm("1: " #inst " %0, %1;" \ ".pushsection .coldtext.memcpy,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".quad 1b, 2b;" \ ".popsection" \ : "=r" (__v) : "m" (*(p)), "r" (n)); \ __v; \ }) #define USERCOPY_FUNC __copy_to_user_inatomic #define ST1(p, v) _ST((p), st1, (v)) #define ST2(p, v) _ST((p), st2, (v)) #define ST4(p, v) _ST((p), st4, (v)) #define ST8(p, v) _ST((p), st, (v)) #define LD1 LD #define LD2 LD #define LD4 LD #define LD8 LD #include "memcpy_64.c" #define USERCOPY_FUNC __copy_from_user_inatomic #define ST1 ST #define ST2 ST #define ST4 ST #define ST8 ST #define LD1(p) _LD((p), ld1u) #define LD2(p) _LD((p), ld2u) #define LD4(p) _LD((p), ld4u) #define LD8(p) _LD((p), ld) #include "memcpy_64.c" #define USERCOPY_FUNC __copy_in_user_inatomic #define ST1(p, v) _ST((p), st1, (v)) #define ST2(p, v) _ST((p), st2, (v)) #define ST4(p, v) _ST((p), st4, (v)) #define ST8(p, v) _ST((p), st, (v)) #define LD1(p) _LD((p), ld1u) #define LD2(p) _LD((p), ld2u) #define LD4(p) _LD((p), ld4u) #define LD8(p) _LD((p), ld) #include "memcpy_64.c" unsigned long __copy_from_user_zeroing(void *to, const void __user *from, unsigned long n) { unsigned long rc = __copy_from_user_inatomic(to, from, n); if (unlikely(rc)) memset(to + n - rc, 0, rc); return rc; }
gpl-2.0
Renzo-Olivares/vigor-ics-kernel
drivers/staging/go7007/wis-uda1342.c
8339
2767
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/tvaudio.h> #include <media/v4l2-common.h> #include "wis-i2c.h" static int write_reg(struct i2c_client *client, int reg, int value) { /* UDA1342 wants MSB first, but SMBus sends LSB first */ i2c_smbus_write_word_data(client, reg, swab16(value)); return 0; } static int wis_uda1342_command(struct i2c_client *client, unsigned int cmd, void *arg) { switch (cmd) { case VIDIOC_S_AUDIO: { int *inp = arg; switch (*inp) { case TVAUDIO_INPUT_TUNER: write_reg(client, 0x00, 0x1441); /* select input 2 */ break; case TVAUDIO_INPUT_EXTERN: write_reg(client, 0x00, 0x1241); /* select input 1 */ break; default: printk(KERN_ERR "wis-uda1342: input %d not supported\n", *inp); break; } break; } default: break; } return 0; } static int wis_uda1342_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; printk(KERN_DEBUG "wis-uda1342: initializing UDA1342 at address %d on %s\n", client->addr, adapter->name); write_reg(client, 0x00, 0x8000); /* reset registers */ write_reg(client, 0x00, 0x1241); /* select input 1 */ return 0; } static int wis_uda1342_remove(struct i2c_client *client) { return 0; } static const struct i2c_device_id wis_uda1342_id[] = { { "wis_uda1342", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wis_uda1342_id); static struct i2c_driver wis_uda1342_driver = { .driver = { .name = "WIS UDA1342 I2C driver", }, .probe = wis_uda1342_probe, .remove = wis_uda1342_remove, .command = wis_uda1342_command, .id_table = wis_uda1342_id, }; static int __init wis_uda1342_init(void) { return i2c_add_driver(&wis_uda1342_driver); } static void __exit wis_uda1342_cleanup(void) { i2c_del_driver(&wis_uda1342_driver); } module_init(wis_uda1342_init); module_exit(wis_uda1342_cleanup); MODULE_LICENSE("GPL v2");
gpl-2.0
Split-Screen/android_kernel_semc_msm7x30
drivers/staging/media/go7007/wis-sony-tuner.c
8339
18645
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "wis-i2c.h" /* #define MPX_DEBUG */ /* AS(IF/MPX) pin: LOW HIGH/OPEN * IF/MPX address: 0x42/0x40 0x43/0x44 */ #define IF_I2C_ADDR 0x43 #define MPX_I2C_ADDR 0x44 static v4l2_std_id force_band; static char force_band_str[] = "-"; module_param_string(force_band, force_band_str, sizeof(force_band_str), 0644); static int force_mpx_mode = -1; module_param(force_mpx_mode, int, 0644); /* Store tuner info in the same format as tuner.c, so maybe we can put the * Sony tuner support in there. */ struct sony_tunertype { char *name; unsigned char Vendor; /* unused here */ unsigned char Type; /* unused here */ unsigned short thresh1; /* band switch VHF_LO <=> VHF_HI */ unsigned short thresh2; /* band switch VHF_HI <=> UHF */ unsigned char VHF_L; unsigned char VHF_H; unsigned char UHF; unsigned char config; unsigned short IFPCoff; }; /* This array is indexed by (tuner_type - 200) */ static struct sony_tunertype sony_tuners[] = { { "Sony PAL+SECAM (BTF-PG472Z)", 0, 0, 16*144.25, 16*427.25, 0x01, 0x02, 0x04, 0xc6, 623}, { "Sony NTSC_JP (BTF-PK467Z)", 0, 0, 16*220.25, 16*467.25, 0x01, 0x02, 0x04, 0xc6, 940}, { "Sony NTSC (BTF-PB463Z)", 0, 0, 16*130.25, 16*364.25, 0x01, 0x02, 0x04, 0xc6, 732}, }; struct wis_sony_tuner { int type; v4l2_std_id std; unsigned int freq; int mpxmode; u32 audmode; }; /* Basically the same as default_set_tv_freq() in tuner.c */ static int set_freq(struct i2c_client *client, int freq) { struct wis_sony_tuner *t = i2c_get_clientdata(client); char *band_name; int n; int band_select; struct sony_tunertype *tun; u8 buffer[4]; tun = &sony_tuners[t->type - 200]; if (freq < tun->thresh1) { band_name = "VHF_L"; band_select = tun->VHF_L; } else if (freq < tun->thresh2) { band_name = "VHF_H"; band_select = tun->VHF_H; } else { band_name = "UHF"; band_select = tun->UHF; } printk(KERN_DEBUG "wis-sony-tuner: tuning to frequency %d.%04d (%s)\n", freq / 16, (freq % 16) * 625, band_name); n = freq + tun->IFPCoff; buffer[0] = n >> 8; buffer[1] = n & 0xff; buffer[2] = tun->config; buffer[3] = band_select; i2c_master_send(client, buffer, 4); return 0; } static int mpx_write(struct i2c_client *client, int dev, int addr, int val) { u8 buffer[5]; struct i2c_msg msg; buffer[0] = dev; buffer[1] = addr >> 8; buffer[2] = addr & 0xff; buffer[3] = val >> 8; buffer[4] = val & 0xff; msg.addr = MPX_I2C_ADDR; msg.flags = 0; msg.len = 5; msg.buf = buffer; i2c_transfer(client->adapter, &msg, 1); return 0; } /* * MPX register values for the BTF-PG472Z: * * FM_ NICAM_ SCART_ * MODUS SOURCE ACB PRESCAL PRESCAL PRESCAL SYSTEM VOLUME * 10/0030 12/0008 12/0013 12/000E 12/0010 12/0000 10/0020 12/0000 * --------------------------------------------------------------- * Auto 1003 0020 0100 2603 5000 XXXX 0001 7500 * * B/G * Mono 1003 0020 0100 2603 5000 XXXX 0003 7500 * A2 1003 0020 0100 2601 5000 XXXX 0003 7500 * NICAM 1003 0120 0100 2603 5000 XXXX 0008 7500 * * I * Mono 1003 0020 0100 2603 7900 XXXX 000A 7500 * NICAM 1003 0120 0100 2603 7900 XXXX 000A 7500 * * D/K * Mono 1003 0020 0100 2603 5000 XXXX 0004 7500 * A2-1 1003 0020 0100 2601 5000 XXXX 0004 7500 * A2-2 1003 0020 0100 2601 5000 XXXX 0005 7500 * A2-3 1003 0020 0100 2601 5000 XXXX 0007 7500 * NICAM 1003 0120 0100 2603 5000 XXXX 000B 7500 * * L/L' * Mono 0003 0200 0100 7C03 5000 2200 0009 7500 * NICAM 0003 0120 0100 7C03 5000 XXXX 0009 7500 * * M * Mono 1003 0200 0100 2B03 5000 2B00 0002 7500 * * For Asia, replace the 0x26XX in FM_PRESCALE with 0x14XX. * * Bilingual selection in A2/NICAM: * * High byte of SOURCE Left chan Right chan * 0x01 MAIN SUB * 0x03 MAIN MAIN * 0x04 SUB SUB * * Force mono in NICAM by setting the high byte of SOURCE to 0x02 (L/L') or * 0x00 (all other bands). Force mono in A2 with FMONO_A2: * * FMONO_A2 * 10/0022 * -------- * Forced mono ON 07F0 * Forced mono OFF 0190 */ static struct { enum { AUD_MONO, AUD_A2, AUD_NICAM, AUD_NICAM_L } audio_mode; u16 modus; u16 source; u16 acb; u16 fm_prescale; u16 nicam_prescale; u16 scart_prescale; u16 system; u16 volume; } mpx_audio_modes[] = { /* Auto */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603, 0x5000, 0x0000, 0x0001, 0x7500 }, /* B/G Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603, 0x5000, 0x0000, 0x0003, 0x7500 }, /* B/G A2 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601, 0x5000, 0x0000, 0x0003, 0x7500 }, /* B/G NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603, 0x5000, 0x0000, 0x0008, 0x7500 }, /* I Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603, 0x7900, 0x0000, 0x000A, 0x7500 }, /* I NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603, 0x7900, 0x0000, 0x000A, 0x7500 }, /* D/K Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603, 0x5000, 0x0000, 0x0004, 0x7500 }, /* D/K A2-1 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601, 0x5000, 0x0000, 0x0004, 0x7500 }, /* D/K A2-2 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601, 0x5000, 0x0000, 0x0005, 0x7500 }, /* D/K A2-3 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601, 0x5000, 0x0000, 0x0007, 0x7500 }, /* D/K NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603, 0x5000, 0x0000, 0x000B, 0x7500 }, /* L/L' Mono */ { AUD_MONO, 0x0003, 0x0200, 0x0100, 0x7C03, 0x5000, 0x2200, 0x0009, 0x7500 }, /* L/L' NICAM */{ AUD_NICAM_L, 0x0003, 0x0120, 0x0100, 0x7C03, 0x5000, 0x0000, 0x0009, 0x7500 }, }; #define MPX_NUM_MODES ARRAY_SIZE(mpx_audio_modes) static int mpx_setup(struct i2c_client *client) { struct wis_sony_tuner *t = i2c_get_clientdata(client); u16 source = 0; u8 buffer[3]; struct i2c_msg msg; /* reset MPX */ buffer[0] = 0x00; buffer[1] = 0x80; buffer[2] = 0x00; msg.addr = MPX_I2C_ADDR; msg.flags = 0; msg.len = 3; msg.buf = buffer; i2c_transfer(client->adapter, &msg, 1); buffer[1] = 0x00; i2c_transfer(client->adapter, &msg, 1); if (mpx_audio_modes[t->mpxmode].audio_mode != AUD_MONO) { switch (t->audmode) { case V4L2_TUNER_MODE_MONO: switch (mpx_audio_modes[t->mpxmode].audio_mode) { case AUD_A2: source = mpx_audio_modes[t->mpxmode].source; break; case AUD_NICAM: source = 0x0000; break; case AUD_NICAM_L: source = 0x0200; break; default: break; } break; case V4L2_TUNER_MODE_STEREO: source = mpx_audio_modes[t->mpxmode].source; break; case V4L2_TUNER_MODE_LANG1: source = 0x0300; break; case V4L2_TUNER_MODE_LANG2: source = 0x0400; break; } source |= mpx_audio_modes[t->mpxmode].source & 0x00ff; } else source = mpx_audio_modes[t->mpxmode].source; mpx_write(client, 0x10, 0x0030, mpx_audio_modes[t->mpxmode].modus); mpx_write(client, 0x12, 0x0008, source); mpx_write(client, 0x12, 0x0013, mpx_audio_modes[t->mpxmode].acb); mpx_write(client, 0x12, 0x000e, mpx_audio_modes[t->mpxmode].fm_prescale); mpx_write(client, 0x12, 0x0010, mpx_audio_modes[t->mpxmode].nicam_prescale); mpx_write(client, 0x12, 0x000d, mpx_audio_modes[t->mpxmode].scart_prescale); mpx_write(client, 0x10, 0x0020, mpx_audio_modes[t->mpxmode].system); mpx_write(client, 0x12, 0x0000, mpx_audio_modes[t->mpxmode].volume); if (mpx_audio_modes[t->mpxmode].audio_mode == AUD_A2) mpx_write(client, 0x10, 0x0022, t->audmode == V4L2_TUNER_MODE_MONO ? 0x07f0 : 0x0190); #ifdef MPX_DEBUG { u8 buf1[3], buf2[2]; struct i2c_msg msgs[2]; printk(KERN_DEBUG "wis-sony-tuner: MPX registers: %04x %04x " "%04x %04x %04x %04x %04x %04x\n", mpx_audio_modes[t->mpxmode].modus, source, mpx_audio_modes[t->mpxmode].acb, mpx_audio_modes[t->mpxmode].fm_prescale, mpx_audio_modes[t->mpxmode].nicam_prescale, mpx_audio_modes[t->mpxmode].scart_prescale, mpx_audio_modes[t->mpxmode].system, mpx_audio_modes[t->mpxmode].volume); buf1[0] = 0x11; buf1[1] = 0x00; buf1[2] = 0x7e; msgs[0].addr = MPX_I2C_ADDR; msgs[0].flags = 0; msgs[0].len = 3; msgs[0].buf = buf1; msgs[1].addr = MPX_I2C_ADDR; msgs[1].flags = I2C_M_RD; msgs[1].len = 2; msgs[1].buf = buf2; i2c_transfer(client->adapter, msgs, 2); printk(KERN_DEBUG "wis-sony-tuner: MPX system: %02x%02x\n", buf2[0], buf2[1]); buf1[0] = 0x11; buf1[1] = 0x02; buf1[2] = 0x00; i2c_transfer(client->adapter, msgs, 2); printk(KERN_DEBUG "wis-sony-tuner: MPX status: %02x%02x\n", buf2[0], buf2[1]); } #endif return 0; } /* * IF configuration values for the BTF-PG472Z: * * B/G: 0x94 0x70 0x49 * I: 0x14 0x70 0x4a * D/K: 0x14 0x70 0x4b * L: 0x04 0x70 0x4b * L': 0x44 0x70 0x53 * M: 0x50 0x30 0x4c */ static int set_if(struct i2c_client *client) { struct wis_sony_tuner *t = i2c_get_clientdata(client); u8 buffer[4]; struct i2c_msg msg; int default_mpx_mode = 0; /* configure IF */ buffer[0] = 0; if (t->std & V4L2_STD_PAL_BG) { buffer[1] = 0x94; buffer[2] = 0x70; buffer[3] = 0x49; default_mpx_mode = 1; } else if (t->std & V4L2_STD_PAL_I) { buffer[1] = 0x14; buffer[2] = 0x70; buffer[3] = 0x4a; default_mpx_mode = 4; } else if (t->std & V4L2_STD_PAL_DK) { buffer[1] = 0x14; buffer[2] = 0x70; buffer[3] = 0x4b; default_mpx_mode = 6; } else if (t->std & V4L2_STD_SECAM_L) { buffer[1] = 0x04; buffer[2] = 0x70; buffer[3] = 0x4b; default_mpx_mode = 11; } msg.addr = IF_I2C_ADDR; msg.flags = 0; msg.len = 4; msg.buf = buffer; i2c_transfer(client->adapter, &msg, 1); /* Select MPX mode if not forced by the user */ if (force_mpx_mode >= 0 && force_mpx_mode < MPX_NUM_MODES) t->mpxmode = force_mpx_mode; else t->mpxmode = default_mpx_mode; printk(KERN_DEBUG "wis-sony-tuner: setting MPX to mode %d\n", t->mpxmode); mpx_setup(client); return 0; } static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) { struct wis_sony_tuner *t = i2c_get_clientdata(client); switch (cmd) { #if 0 #ifdef TUNER_SET_TYPE_ADDR case TUNER_SET_TYPE_ADDR: { struct tuner_setup *tun_setup = arg; int *type = &tun_setup->type; #else case TUNER_SET_TYPE: { int *type = arg; #endif if (t->type >= 0) { if (t->type != *type) printk(KERN_ERR "wis-sony-tuner: type already " "set to %d, ignoring request for %d\n", t->type, *type); break; } t->type = *type; switch (t->type) { case TUNER_SONY_BTF_PG472Z: switch (force_band_str[0]) { case 'b': case 'B': case 'g': case 'G': printk(KERN_INFO "wis-sony-tuner: forcing " "tuner to PAL-B/G bands\n"); force_band = V4L2_STD_PAL_BG; break; case 'i': case 'I': printk(KERN_INFO "wis-sony-tuner: forcing " "tuner to PAL-I band\n"); force_band = V4L2_STD_PAL_I; break; case 'd': case 'D': case 'k': case 'K': printk(KERN_INFO "wis-sony-tuner: forcing " "tuner to PAL-D/K bands\n"); force_band = V4L2_STD_PAL_I; break; case 'l': case 'L': printk(KERN_INFO "wis-sony-tuner: forcing " "tuner to SECAM-L band\n"); force_band = V4L2_STD_SECAM_L; break; default: force_band = 0; break; } if (force_band) t->std = force_band; else t->std = V4L2_STD_PAL_BG; set_if(client); break; case TUNER_SONY_BTF_PK467Z: t->std = V4L2_STD_NTSC_M_JP; break; case TUNER_SONY_BTF_PB463Z: t->std = V4L2_STD_NTSC_M; break; default: printk(KERN_ERR "wis-sony-tuner: tuner type %d is not " "supported by this module\n", *type); break; } if (type >= 0) printk(KERN_INFO "wis-sony-tuner: type set to %d (%s)\n", t->type, sony_tuners[t->type - 200].name); break; } #endif case VIDIOC_G_FREQUENCY: { struct v4l2_frequency *f = arg; f->frequency = t->freq; break; } case VIDIOC_S_FREQUENCY: { struct v4l2_frequency *f = arg; t->freq = f->frequency; set_freq(client, t->freq); break; } case VIDIOC_ENUMSTD: { struct v4l2_standard *std = arg; switch (t->type) { case TUNER_SONY_BTF_PG472Z: switch (std->index) { case 0: v4l2_video_std_construct(std, V4L2_STD_PAL_BG, "PAL-B/G"); break; case 1: v4l2_video_std_construct(std, V4L2_STD_PAL_I, "PAL-I"); break; case 2: v4l2_video_std_construct(std, V4L2_STD_PAL_DK, "PAL-D/K"); break; case 3: v4l2_video_std_construct(std, V4L2_STD_SECAM_L, "SECAM-L"); break; default: std->id = 0; /* hack to indicate EINVAL */ break; } break; case TUNER_SONY_BTF_PK467Z: if (std->index != 0) { std->id = 0; /* hack to indicate EINVAL */ break; } v4l2_video_std_construct(std, V4L2_STD_NTSC_M_JP, "NTSC-J"); break; case TUNER_SONY_BTF_PB463Z: if (std->index != 0) { std->id = 0; /* hack to indicate EINVAL */ break; } v4l2_video_std_construct(std, V4L2_STD_NTSC_M, "NTSC"); break; } break; } case VIDIOC_G_STD: { v4l2_std_id *std = arg; *std = t->std; break; } case VIDIOC_S_STD: { v4l2_std_id *std = arg; v4l2_std_id old = t->std; switch (t->type) { case TUNER_SONY_BTF_PG472Z: if (force_band && (*std & force_band) != *std && *std != V4L2_STD_PAL && *std != V4L2_STD_SECAM) { printk(KERN_DEBUG "wis-sony-tuner: ignoring " "requested TV standard in " "favor of force_band value\n"); t->std = force_band; } else if (*std & V4L2_STD_PAL_BG) { /* default */ t->std = V4L2_STD_PAL_BG; } else if (*std & V4L2_STD_PAL_I) { t->std = V4L2_STD_PAL_I; } else if (*std & V4L2_STD_PAL_DK) { t->std = V4L2_STD_PAL_DK; } else if (*std & V4L2_STD_SECAM_L) { t->std = V4L2_STD_SECAM_L; } else { printk(KERN_ERR "wis-sony-tuner: TV standard " "not supported\n"); *std = 0; /* hack to indicate EINVAL */ break; } if (old != t->std) set_if(client); break; case TUNER_SONY_BTF_PK467Z: if (!(*std & V4L2_STD_NTSC_M_JP)) { printk(KERN_ERR "wis-sony-tuner: TV standard " "not supported\n"); *std = 0; /* hack to indicate EINVAL */ } break; case TUNER_SONY_BTF_PB463Z: if (!(*std & V4L2_STD_NTSC_M)) { printk(KERN_ERR "wis-sony-tuner: TV standard " "not supported\n"); *std = 0; /* hack to indicate EINVAL */ } break; } break; } case VIDIOC_QUERYSTD: { v4l2_std_id *std = arg; switch (t->type) { case TUNER_SONY_BTF_PG472Z: if (force_band) *std = force_band; else *std = V4L2_STD_PAL_BG | V4L2_STD_PAL_I | V4L2_STD_PAL_DK | V4L2_STD_SECAM_L; break; case TUNER_SONY_BTF_PK467Z: *std = V4L2_STD_NTSC_M_JP; break; case TUNER_SONY_BTF_PB463Z: *std = V4L2_STD_NTSC_M; break; } break; } case VIDIOC_G_TUNER: { struct v4l2_tuner *tun = arg; memset(tun, 0, sizeof(*tun)); strcpy(tun->name, "Television"); tun->type = V4L2_TUNER_ANALOG_TV; tun->rangelow = 0UL; /* does anything use these? */ tun->rangehigh = 0xffffffffUL; switch (t->type) { case TUNER_SONY_BTF_PG472Z: tun->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; tun->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case TUNER_SONY_BTF_PK467Z: case TUNER_SONY_BTF_PB463Z: tun->capability = V4L2_TUNER_CAP_STEREO; tun->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; break; } tun->audmode = t->audmode; return 0; } case VIDIOC_S_TUNER: { struct v4l2_tuner *tun = arg; switch (t->type) { case TUNER_SONY_BTF_PG472Z: if (tun->audmode != t->audmode) { t->audmode = tun->audmode; mpx_setup(client); } break; case TUNER_SONY_BTF_PK467Z: case TUNER_SONY_BTF_PB463Z: break; } return 0; } default: break; } return 0; } static int wis_sony_tuner_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct wis_sony_tuner *t; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) return -ENODEV; t = kmalloc(sizeof(struct wis_sony_tuner), GFP_KERNEL); if (t == NULL) return -ENOMEM; t->type = -1; t->freq = 0; t->mpxmode = 0; t->audmode = V4L2_TUNER_MODE_STEREO; i2c_set_clientdata(client, t); printk(KERN_DEBUG "wis-sony-tuner: initializing tuner at address %d on %s\n", client->addr, adapter->name); return 0; } static int wis_sony_tuner_remove(struct i2c_client *client) { struct wis_sony_tuner *t = i2c_get_clientdata(client); kfree(t); return 0; } static const struct i2c_device_id wis_sony_tuner_id[] = { { "wis_sony_tuner", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wis_sony_tuner_id); static struct i2c_driver wis_sony_tuner_driver = { .driver = { .name = "WIS Sony TV Tuner I2C driver", }, .probe = wis_sony_tuner_probe, .remove = wis_sony_tuner_remove, .command = tuner_command, .id_table = wis_sony_tuner_id, }; static int __init wis_sony_tuner_init(void) { return i2c_add_driver(&wis_sony_tuner_driver); } static void __exit wis_sony_tuner_cleanup(void) { i2c_del_driver(&wis_sony_tuner_driver); } module_init(wis_sony_tuner_init); module_exit(wis_sony_tuner_cleanup); MODULE_LICENSE("GPL v2");
gpl-2.0
andyvand/cygnewlib
newlib/libc/sys/linux/cmath/s_cacoshf.c
148
2795
/* Return arc hyperbole cosine for float value. Copyright (C) 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <complex.h> #include <math.h> #include "math_private.h" __complex__ float __cacoshf (__complex__ float x) { __complex__ float res; int rcls = fpclassify (__real__ x); int icls = fpclassify (__imag__ x); if (rcls <= FP_INFINITE || icls <= FP_INFINITE) { if (icls == FP_INFINITE) { __real__ res = HUGE_VALF; if (rcls == FP_NAN) __imag__ res = __nanf (""); else __imag__ res = __copysignf ((rcls == FP_INFINITE ? (__real__ x < 0.0 ? M_PI - M_PI_4 : M_PI_4) : M_PI_2), __imag__ x); } else if (rcls == FP_INFINITE) { __real__ res = HUGE_VALF; if (icls >= FP_ZERO) __imag__ res = __copysignf (signbit (__real__ x) ? M_PI : 0.0, __imag__ x); else __imag__ res = __nanf (""); } else { __real__ res = __nanf (""); __imag__ res = __nanf (""); } } else if (rcls == FP_ZERO && icls == FP_ZERO) { __real__ res = 0.0; __imag__ res = __copysignf (M_PI_2, __imag__ x); } else { #if 1 __complex__ float y; __real__ y = (__real__ x - __imag__ x) * (__real__ x + __imag__ x) - 1.0; __imag__ y = 2.0 * __real__ x * __imag__ x; y = __csqrtf (y); __real__ y += __real__ x; __imag__ y += __imag__ x; res = __clogf (y); #else float re2 = __real__ x * __real__ x; float im2 = __imag__ x * __imag__ x; float sq = re2 - im2 - 1.0; float ro = __ieee754_sqrtf (sq * sq + 4 * re2 * im2); float a = __ieee754_sqrtf ((sq + ro) / 2.0); float b = __ieee754_sqrtf ((-sq + ro) / 2.0); __real__ res = 0.5 * __ieee754_logf (re2 + __real__ x * 2 * a + im2 + __imag__ x * 2 * b + ro); __imag__ res = __ieee754_atan2f (__imag__ x + b, __real__ x + a); #endif } return res; } weak_alias (__cacoshf, cacoshf)
gpl-2.0
t-crest/patmos-newlib
newlib/libc/sys/linux/linuxthreads/td_thr_clear_event.c
148
1919
/* Disable specific event for thread. Copyright (C) 1999 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1999. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <stddef.h> #include "thread_dbP.h" td_err_e td_thr_clear_event (th, event) const td_thrhandle_t *th; td_thr_events_t *event; { td_thr_events_t old_event; int i; LOG ("td_thr_clear_event"); /* Write the new value into the thread data structure. */ if (ps_pdread (th->th_ta_p->ph, ((char *) th->th_unique + offsetof (struct _pthread_descr_struct, p_eventbuf.eventmask)), &old_event, sizeof (td_thrhandle_t)) != PS_OK) return TD_ERR; /* XXX Other error value? */ /* Remove the set bits in. */ for (i = 0; i < TD_EVENTSIZE; ++i) old_event.event_bits[i] &= ~event->event_bits[i]; /* Write the new value into the thread data structure. */ if (ps_pdwrite (th->th_ta_p->ph, ((char *) th->th_unique + offsetof (struct _pthread_descr_struct, p_eventbuf.eventmask)), &old_event, sizeof (td_thrhandle_t)) != PS_OK) return TD_ERR; /* XXX Other error value? */ return TD_OK; }
gpl-2.0
kfazz/android_kernel_motorola_sholes
net/sunrpc/svcauth_unix.c
148
20725
#include <linux/types.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/svcauth.h> #include <linux/sunrpc/gss_api.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/hash.h> #include <linux/string.h> #include <net/sock.h> #include <net/ipv6.h> #include <linux/kernel.h> #define RPCDBG_FACILITY RPCDBG_AUTH /* * AUTHUNIX and AUTHNULL credentials are both handled here. * AUTHNULL is treated just like AUTHUNIX except that the uid/gid * are always nobody (-2). i.e. we do the same IP address checks for * AUTHNULL as for AUTHUNIX, and that is done here. */ struct unix_domain { struct auth_domain h; int addr_changes; /* other stuff later */ }; extern struct auth_ops svcauth_unix; struct auth_domain *unix_domain_find(char *name) { struct auth_domain *rv; struct unix_domain *new = NULL; rv = auth_domain_lookup(name, NULL); while(1) { if (rv) { if (new && rv != &new->h) auth_domain_put(&new->h); if (rv->flavour != &svcauth_unix) { auth_domain_put(rv); return NULL; } return rv; } new = kmalloc(sizeof(*new), GFP_KERNEL); if (new == NULL) return NULL; kref_init(&new->h.ref); new->h.name = kstrdup(name, GFP_KERNEL); if (new->h.name == NULL) { kfree(new); return NULL; } new->h.flavour = &svcauth_unix; new->addr_changes = 0; rv = auth_domain_lookup(name, &new->h); } } EXPORT_SYMBOL_GPL(unix_domain_find); static void svcauth_unix_domain_release(struct auth_domain *dom) { struct unix_domain *ud = container_of(dom, struct unix_domain, h); kfree(dom->name); kfree(ud); } /************************************************** * cache for IP address to unix_domain * as needed by AUTH_UNIX */ #define IP_HASHBITS 8 #define IP_HASHMAX (1<<IP_HASHBITS) #define IP_HASHMASK (IP_HASHMAX-1) struct ip_map { struct cache_head h; char m_class[8]; /* e.g. "nfsd" */ struct in6_addr m_addr; struct unix_domain *m_client; int m_add_change; }; static struct cache_head *ip_table[IP_HASHMAX]; static void ip_map_put(struct kref *kref) { struct cache_head *item = container_of(kref, struct cache_head, ref); struct ip_map *im = container_of(item, struct ip_map,h); if (test_bit(CACHE_VALID, &item->flags) && !test_bit(CACHE_NEGATIVE, &item->flags)) auth_domain_put(&im->m_client->h); kfree(im); } #if IP_HASHBITS == 8 /* hash_long on a 64 bit machine is currently REALLY BAD for * IP addresses in reverse-endian (i.e. on a little-endian machine). * So use a trivial but reliable hash instead */ static inline int hash_ip(__be32 ip) { int hash = (__force u32)ip ^ ((__force u32)ip>>16); return (hash ^ (hash>>8)) & 0xff; } #endif static inline int hash_ip6(struct in6_addr ip) { return (hash_ip(ip.s6_addr32[0]) ^ hash_ip(ip.s6_addr32[1]) ^ hash_ip(ip.s6_addr32[2]) ^ hash_ip(ip.s6_addr32[3])); } static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) { struct ip_map *orig = container_of(corig, struct ip_map, h); struct ip_map *new = container_of(cnew, struct ip_map, h); return strcmp(orig->m_class, new->m_class) == 0 && ipv6_addr_equal(&orig->m_addr, &new->m_addr); } static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) { struct ip_map *new = container_of(cnew, struct ip_map, h); struct ip_map *item = container_of(citem, struct ip_map, h); strcpy(new->m_class, item->m_class); ipv6_addr_copy(&new->m_addr, &item->m_addr); } static void update(struct cache_head *cnew, struct cache_head *citem) { struct ip_map *new = container_of(cnew, struct ip_map, h); struct ip_map *item = container_of(citem, struct ip_map, h); kref_get(&item->m_client->h.ref); new->m_client = item->m_client; new->m_add_change = item->m_add_change; } static struct cache_head *ip_map_alloc(void) { struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); if (i) return &i->h; else return NULL; } static void ip_map_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { char text_addr[40]; struct ip_map *im = container_of(h, struct ip_map, h); if (ipv6_addr_v4mapped(&(im->m_addr))) { snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]); } else { snprintf(text_addr, 40, "%pI6", &im->m_addr); } qword_add(bpp, blen, im->m_class); qword_add(bpp, blen, text_addr); (*bpp)[-1] = '\n'; } static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h, ip_map_request); } static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr); static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); static int ip_map_parse(struct cache_detail *cd, char *mesg, int mlen) { /* class ipaddress [domainname] */ /* should be safe just to use the start of the input buffer * for scratch: */ char *buf = mesg; int len; int b1, b2, b3, b4, b5, b6, b7, b8; char c; char class[8]; struct in6_addr addr; int err; struct ip_map *ipmp; struct auth_domain *dom; time_t expiry; if (mesg[mlen-1] != '\n') return -EINVAL; mesg[mlen-1] = 0; /* class */ len = qword_get(&mesg, class, sizeof(class)); if (len <= 0) return -EINVAL; /* ip address */ len = qword_get(&mesg, buf, mlen); if (len <= 0) return -EINVAL; if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) == 4) { addr.s6_addr32[0] = 0; addr.s6_addr32[1] = 0; addr.s6_addr32[2] = htonl(0xffff); addr.s6_addr32[3] = htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); } else if (sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c", &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) { addr.s6_addr16[0] = htons(b1); addr.s6_addr16[1] = htons(b2); addr.s6_addr16[2] = htons(b3); addr.s6_addr16[3] = htons(b4); addr.s6_addr16[4] = htons(b5); addr.s6_addr16[5] = htons(b6); addr.s6_addr16[6] = htons(b7); addr.s6_addr16[7] = htons(b8); } else return -EINVAL; expiry = get_expiry(&mesg); if (expiry ==0) return -EINVAL; /* domainname, or empty for NEGATIVE */ len = qword_get(&mesg, buf, mlen); if (len < 0) return -EINVAL; if (len) { dom = unix_domain_find(buf); if (dom == NULL) return -ENOENT; } else dom = NULL; ipmp = ip_map_lookup(class, &addr); if (ipmp) { err = ip_map_update(ipmp, container_of(dom, struct unix_domain, h), expiry); } else err = -ENOMEM; if (dom) auth_domain_put(dom); cache_flush(); return err; } static int ip_map_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ip_map *im; struct in6_addr addr; char *dom = "-no-domain-"; if (h == NULL) { seq_puts(m, "#class IP domain\n"); return 0; } im = container_of(h, struct ip_map, h); /* class addr domain */ ipv6_addr_copy(&addr, &im->m_addr); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) dom = im->m_client->h.name; if (ipv6_addr_v4mapped(&addr)) { seq_printf(m, "%s %pI4 %s\n", im->m_class, &addr.s6_addr32[3], dom); } else { seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); } return 0; } struct cache_detail ip_map_cache = { .owner = THIS_MODULE, .hash_size = IP_HASHMAX, .hash_table = ip_table, .name = "auth.unix.ip", .cache_put = ip_map_put, .cache_upcall = ip_map_upcall, .cache_parse = ip_map_parse, .cache_show = ip_map_show, .match = ip_map_match, .init = ip_map_init, .update = update, .alloc = ip_map_alloc, }; static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr) { struct ip_map ip; struct cache_head *ch; strcpy(ip.m_class, class); ipv6_addr_copy(&ip.m_addr, addr); ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, hash_str(class, IP_HASHBITS) ^ hash_ip6(*addr)); if (ch) return container_of(ch, struct ip_map, h); else return NULL; } static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry) { struct ip_map ip; struct cache_head *ch; ip.m_client = udom; ip.h.flags = 0; if (!udom) set_bit(CACHE_NEGATIVE, &ip.h.flags); else { ip.m_add_change = udom->addr_changes; /* if this is from the legacy set_client system call, * we need m_add_change to be one higher */ if (expiry == NEVER) ip.m_add_change++; } ip.h.expiry_time = expiry; ch = sunrpc_cache_update(&ip_map_cache, &ip.h, &ipm->h, hash_str(ipm->m_class, IP_HASHBITS) ^ hash_ip6(ipm->m_addr)); if (!ch) return -ENOMEM; cache_put(ch, &ip_map_cache); return 0; } int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) { struct unix_domain *udom; struct ip_map *ipmp; if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); ipmp = ip_map_lookup("nfsd", addr); if (ipmp) return ip_map_update(ipmp, udom, NEVER); else return -ENOMEM; } EXPORT_SYMBOL_GPL(auth_unix_add_addr); int auth_unix_forget_old(struct auth_domain *dom) { struct unix_domain *udom; if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); udom->addr_changes++; return 0; } EXPORT_SYMBOL_GPL(auth_unix_forget_old); struct auth_domain *auth_unix_lookup(struct in6_addr *addr) { struct ip_map *ipm; struct auth_domain *rv; ipm = ip_map_lookup("nfsd", addr); if (!ipm) return NULL; if (cache_check(&ip_map_cache, &ipm->h, NULL)) return NULL; if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) auth_domain_put(&ipm->m_client->h); rv = NULL; } else { rv = &ipm->m_client->h; kref_get(&rv->ref); } cache_put(&ipm->h, &ip_map_cache); return rv; } EXPORT_SYMBOL_GPL(auth_unix_lookup); void svcauth_unix_purge(void) { cache_purge(&ip_map_cache); } EXPORT_SYMBOL_GPL(svcauth_unix_purge); static inline struct ip_map * ip_map_cached_get(struct svc_rqst *rqstp) { struct ip_map *ipm = NULL; struct svc_xprt *xprt = rqstp->rq_xprt; if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { spin_lock(&xprt->xpt_lock); ipm = xprt->xpt_auth_cache; if (ipm != NULL) { if (!cache_valid(&ipm->h)) { /* * The entry has been invalidated since it was * remembered, e.g. by a second mount from the * same IP address. */ xprt->xpt_auth_cache = NULL; spin_unlock(&xprt->xpt_lock); cache_put(&ipm->h, &ip_map_cache); return NULL; } cache_get(&ipm->h); } spin_unlock(&xprt->xpt_lock); } return ipm; } static inline void ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) { struct svc_xprt *xprt = rqstp->rq_xprt; if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { spin_lock(&xprt->xpt_lock); if (xprt->xpt_auth_cache == NULL) { /* newly cached, keep the reference */ xprt->xpt_auth_cache = ipm; ipm = NULL; } spin_unlock(&xprt->xpt_lock); } if (ipm) cache_put(&ipm->h, &ip_map_cache); } void svcauth_unix_info_release(void *info) { struct ip_map *ipm = info; cache_put(&ipm->h, &ip_map_cache); } /**************************************************************************** * auth.unix.gid cache * simple cache to map a UID to a list of GIDs * because AUTH_UNIX aka AUTH_SYS has a max of 16 */ #define GID_HASHBITS 8 #define GID_HASHMAX (1<<GID_HASHBITS) #define GID_HASHMASK (GID_HASHMAX - 1) struct unix_gid { struct cache_head h; uid_t uid; struct group_info *gi; }; static struct cache_head *gid_table[GID_HASHMAX]; static void unix_gid_put(struct kref *kref) { struct cache_head *item = container_of(kref, struct cache_head, ref); struct unix_gid *ug = container_of(item, struct unix_gid, h); if (test_bit(CACHE_VALID, &item->flags) && !test_bit(CACHE_NEGATIVE, &item->flags)) put_group_info(ug->gi); kfree(ug); } static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) { struct unix_gid *orig = container_of(corig, struct unix_gid, h); struct unix_gid *new = container_of(cnew, struct unix_gid, h); return orig->uid == new->uid; } static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) { struct unix_gid *new = container_of(cnew, struct unix_gid, h); struct unix_gid *item = container_of(citem, struct unix_gid, h); new->uid = item->uid; } static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem) { struct unix_gid *new = container_of(cnew, struct unix_gid, h); struct unix_gid *item = container_of(citem, struct unix_gid, h); get_group_info(item->gi); new->gi = item->gi; } static struct cache_head *unix_gid_alloc(void) { struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL); if (g) return &g->h; else return NULL; } static void unix_gid_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { char tuid[20]; struct unix_gid *ug = container_of(h, struct unix_gid, h); snprintf(tuid, 20, "%u", ug->uid); qword_add(bpp, blen, tuid); (*bpp)[-1] = '\n'; } static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request); } static struct unix_gid *unix_gid_lookup(uid_t uid); extern struct cache_detail unix_gid_cache; static int unix_gid_parse(struct cache_detail *cd, char *mesg, int mlen) { /* uid expiry Ngid gid0 gid1 ... gidN-1 */ int uid; int gids; int rv; int i; int err; time_t expiry; struct unix_gid ug, *ugp; if (mlen <= 0 || mesg[mlen-1] != '\n') return -EINVAL; mesg[mlen-1] = 0; rv = get_int(&mesg, &uid); if (rv) return -EINVAL; ug.uid = uid; expiry = get_expiry(&mesg); if (expiry == 0) return -EINVAL; rv = get_int(&mesg, &gids); if (rv || gids < 0 || gids > 8192) return -EINVAL; ug.gi = groups_alloc(gids); if (!ug.gi) return -ENOMEM; for (i = 0 ; i < gids ; i++) { int gid; rv = get_int(&mesg, &gid); err = -EINVAL; if (rv) goto out; GROUP_AT(ug.gi, i) = gid; } ugp = unix_gid_lookup(uid); if (ugp) { struct cache_head *ch; ug.h.flags = 0; ug.h.expiry_time = expiry; ch = sunrpc_cache_update(&unix_gid_cache, &ug.h, &ugp->h, hash_long(uid, GID_HASHBITS)); if (!ch) err = -ENOMEM; else { err = 0; cache_put(ch, &unix_gid_cache); } } else err = -ENOMEM; out: if (ug.gi) put_group_info(ug.gi); return err; } static int unix_gid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct unix_gid *ug; int i; int glen; if (h == NULL) { seq_puts(m, "#uid cnt: gids...\n"); return 0; } ug = container_of(h, struct unix_gid, h); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) glen = ug->gi->ngroups; else glen = 0; seq_printf(m, "%d %d:", ug->uid, glen); for (i = 0; i < glen; i++) seq_printf(m, " %d", GROUP_AT(ug->gi, i)); seq_printf(m, "\n"); return 0; } struct cache_detail unix_gid_cache = { .owner = THIS_MODULE, .hash_size = GID_HASHMAX, .hash_table = gid_table, .name = "auth.unix.gid", .cache_put = unix_gid_put, .cache_upcall = unix_gid_upcall, .cache_parse = unix_gid_parse, .cache_show = unix_gid_show, .match = unix_gid_match, .init = unix_gid_init, .update = unix_gid_update, .alloc = unix_gid_alloc, }; static struct unix_gid *unix_gid_lookup(uid_t uid) { struct unix_gid ug; struct cache_head *ch; ug.uid = uid; ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h, hash_long(uid, GID_HASHBITS)); if (ch) return container_of(ch, struct unix_gid, h); else return NULL; } static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) { struct unix_gid *ug; struct group_info *gi; int ret; ug = unix_gid_lookup(uid); if (!ug) return ERR_PTR(-EAGAIN); ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); switch (ret) { case -ENOENT: return ERR_PTR(-ENOENT); case 0: gi = get_group_info(ug->gi); cache_put(&ug->h, &unix_gid_cache); return gi; default: return ERR_PTR(-EAGAIN); } } int svcauth_unix_set_client(struct svc_rqst *rqstp) { struct sockaddr_in *sin; struct sockaddr_in6 *sin6, sin6_storage; struct ip_map *ipm; struct group_info *gi; struct svc_cred *cred = &rqstp->rq_cred; switch (rqstp->rq_addr.ss_family) { case AF_INET: sin = svc_addr_in(rqstp); sin6 = &sin6_storage; ipv6_addr_set(&sin6->sin6_addr, 0, 0, htonl(0x0000FFFF), sin->sin_addr.s_addr); break; case AF_INET6: sin6 = svc_addr_in6(rqstp); break; default: BUG(); } rqstp->rq_client = NULL; if (rqstp->rq_proc == 0) return SVC_OK; ipm = ip_map_cached_get(rqstp); if (ipm == NULL) ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, &sin6->sin6_addr); if (ipm == NULL) return SVC_DENIED; switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { default: BUG(); case -EAGAIN: case -ETIMEDOUT: return SVC_DROP; case -ENOENT: return SVC_DENIED; case 0: rqstp->rq_client = &ipm->m_client->h; kref_get(&rqstp->rq_client->ref); ip_map_cached_put(rqstp, ipm); break; } gi = unix_gid_find(cred->cr_uid, rqstp); switch (PTR_ERR(gi)) { case -EAGAIN: return SVC_DROP; case -ENOENT: break; default: put_group_info(cred->cr_group_info); cred->cr_group_info = gi; } return SVC_OK; } EXPORT_SYMBOL_GPL(svcauth_unix_set_client); static int svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) { struct kvec *argv = &rqstp->rq_arg.head[0]; struct kvec *resv = &rqstp->rq_res.head[0]; struct svc_cred *cred = &rqstp->rq_cred; cred->cr_group_info = NULL; rqstp->rq_client = NULL; if (argv->iov_len < 3*4) return SVC_GARBAGE; if (svc_getu32(argv) != 0) { dprintk("svc: bad null cred\n"); *authp = rpc_autherr_badcred; return SVC_DENIED; } if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { dprintk("svc: bad null verf\n"); *authp = rpc_autherr_badverf; return SVC_DENIED; } /* Signal that mapping to nobody uid/gid is required */ cred->cr_uid = (uid_t) -1; cred->cr_gid = (gid_t) -1; cred->cr_group_info = groups_alloc(0); if (cred->cr_group_info == NULL) return SVC_DROP; /* kmalloc failure - client must retry */ /* Put NULL verifier */ svc_putnl(resv, RPC_AUTH_NULL); svc_putnl(resv, 0); rqstp->rq_flavor = RPC_AUTH_NULL; return SVC_OK; } static int svcauth_null_release(struct svc_rqst *rqstp) { if (rqstp->rq_client) auth_domain_put(rqstp->rq_client); rqstp->rq_client = NULL; if (rqstp->rq_cred.cr_group_info) put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; return 0; /* don't drop */ } struct auth_ops svcauth_null = { .name = "null", .owner = THIS_MODULE, .flavour = RPC_AUTH_NULL, .accept = svcauth_null_accept, .release = svcauth_null_release, .set_client = svcauth_unix_set_client, }; static int svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) { struct kvec *argv = &rqstp->rq_arg.head[0]; struct kvec *resv = &rqstp->rq_res.head[0]; struct svc_cred *cred = &rqstp->rq_cred; u32 slen, i; int len = argv->iov_len; cred->cr_group_info = NULL; rqstp->rq_client = NULL; if ((len -= 3*4) < 0) return SVC_GARBAGE; svc_getu32(argv); /* length */ svc_getu32(argv); /* time stamp */ slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */ if (slen > 64 || (len -= (slen + 3)*4) < 0) goto badcred; argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ argv->iov_len -= slen*4; cred->cr_uid = svc_getnl(argv); /* uid */ cred->cr_gid = svc_getnl(argv); /* gid */ slen = svc_getnl(argv); /* gids length */ if (slen > 16 || (len -= (slen + 2)*4) < 0) goto badcred; cred->cr_group_info = groups_alloc(slen); if (cred->cr_group_info == NULL) return SVC_DROP; for (i = 0; i < slen; i++) GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { *authp = rpc_autherr_badverf; return SVC_DENIED; } /* Put NULL verifier */ svc_putnl(resv, RPC_AUTH_NULL); svc_putnl(resv, 0); rqstp->rq_flavor = RPC_AUTH_UNIX; return SVC_OK; badcred: *authp = rpc_autherr_badcred; return SVC_DENIED; } static int svcauth_unix_release(struct svc_rqst *rqstp) { /* Verifier (such as it is) is already in place. */ if (rqstp->rq_client) auth_domain_put(rqstp->rq_client); rqstp->rq_client = NULL; if (rqstp->rq_cred.cr_group_info) put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; return 0; } struct auth_ops svcauth_unix = { .name = "unix", .owner = THIS_MODULE, .flavour = RPC_AUTH_UNIX, .accept = svcauth_unix_accept, .release = svcauth_unix_release, .domain_release = svcauth_unix_domain_release, .set_client = svcauth_unix_set_client, };
gpl-2.0
szyusong/linux-at91
lib/string_helpers.c
404
10277
/* * Helpers for formatting and printing strings * * Copyright 31 August 2008 James Bottomley * Copyright (C) 2013, Intel Corporation */ #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/string_helpers.h> /** * string_get_size - get the size in the specified units * @size: The size to be converted * @units: units to use (powers of 1000 or 1024) * @buf: buffer to format to * @len: length of buffer * * This function returns a string formatted to 3 significant figures * giving the size in the required units. Returns 0 on success or * error on failure. @buf is always zero terminated. * */ int string_get_size(u64 size, const enum string_size_units units, char *buf, int len) { static const char *const units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL }; static const char *const units_2[] = { "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", NULL }; static const char *const *const units_str[] = { [STRING_UNITS_10] = units_10, [STRING_UNITS_2] = units_2, }; static const unsigned int divisor[] = { [STRING_UNITS_10] = 1000, [STRING_UNITS_2] = 1024, }; int i, j; u64 remainder = 0, sf_cap; char tmp[8]; tmp[0] = '\0'; i = 0; if (size >= divisor[units]) { while (size >= divisor[units] && units_str[units][i]) { remainder = do_div(size, divisor[units]); i++; } sf_cap = size; for (j = 0; sf_cap*10 < 1000; j++) sf_cap *= 10; if (j) { remainder *= 1000; do_div(remainder, divisor[units]); snprintf(tmp, sizeof(tmp), ".%03lld", (unsigned long long)remainder); tmp[j+1] = '\0'; } } snprintf(buf, len, "%lld%s %s", (unsigned long long)size, tmp, units_str[units][i]); return 0; } EXPORT_SYMBOL(string_get_size); static bool unescape_space(char **src, char **dst) { char *p = *dst, *q = *src; switch (*q) { case 'n': *p = '\n'; break; case 'r': *p = '\r'; break; case 't': *p = '\t'; break; case 'v': *p = '\v'; break; case 'f': *p = '\f'; break; default: return false; } *dst += 1; *src += 1; return true; } static bool unescape_octal(char **src, char **dst) { char *p = *dst, *q = *src; u8 num; if (isodigit(*q) == 0) return false; num = (*q++) & 7; while (num < 32 && isodigit(*q) && (q - *src < 3)) { num <<= 3; num += (*q++) & 7; } *p = num; *dst += 1; *src = q; return true; } static bool unescape_hex(char **src, char **dst) { char *p = *dst, *q = *src; int digit; u8 num; if (*q++ != 'x') return false; num = digit = hex_to_bin(*q++); if (digit < 0) return false; digit = hex_to_bin(*q); if (digit >= 0) { q++; num = (num << 4) | digit; } *p = num; *dst += 1; *src = q; return true; } static bool unescape_special(char **src, char **dst) { char *p = *dst, *q = *src; switch (*q) { case '\"': *p = '\"'; break; case '\\': *p = '\\'; break; case 'a': *p = '\a'; break; case 'e': *p = '\e'; break; default: return false; } *dst += 1; *src += 1; return true; } /** * string_unescape - unquote characters in the given string * @src: source buffer (escaped) * @dst: destination buffer (unescaped) * @size: size of the destination buffer (0 to unlimit) * @flags: combination of the flags (bitwise OR): * %UNESCAPE_SPACE: * '\f' - form feed * '\n' - new line * '\r' - carriage return * '\t' - horizontal tab * '\v' - vertical tab * %UNESCAPE_OCTAL: * '\NNN' - byte with octal value NNN (1 to 3 digits) * %UNESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) * %UNESCAPE_SPECIAL: * '\"' - double quote * '\\' - backslash * '\a' - alert (BEL) * '\e' - escape * %UNESCAPE_ANY: * all previous together * * Description: * The function unquotes characters in the given string. * * Because the size of the output will be the same as or less than the size of * the input, the transformation may be performed in place. * * Caller must provide valid source and destination pointers. Be aware that * destination buffer will always be NULL-terminated. Source string must be * NULL-terminated as well. * * Return: * The amount of the characters processed to the destination buffer excluding * trailing '\0' is returned. */ int string_unescape(char *src, char *dst, size_t size, unsigned int flags) { char *out = dst; while (*src && --size) { if (src[0] == '\\' && src[1] != '\0' && size > 1) { src++; size--; if (flags & UNESCAPE_SPACE && unescape_space(&src, &out)) continue; if (flags & UNESCAPE_OCTAL && unescape_octal(&src, &out)) continue; if (flags & UNESCAPE_HEX && unescape_hex(&src, &out)) continue; if (flags & UNESCAPE_SPECIAL && unescape_special(&src, &out)) continue; *out++ = '\\'; } *out++ = *src++; } *out = '\0'; return out - dst; } EXPORT_SYMBOL(string_unescape); static int escape_passthrough(unsigned char c, char **dst, size_t *osz) { char *out = *dst; if (*osz < 1) return -ENOMEM; *out++ = c; *dst = out; *osz -= 1; return 1; } static int escape_space(unsigned char c, char **dst, size_t *osz) { char *out = *dst; unsigned char to; if (*osz < 2) return -ENOMEM; switch (c) { case '\n': to = 'n'; break; case '\r': to = 'r'; break; case '\t': to = 't'; break; case '\v': to = 'v'; break; case '\f': to = 'f'; break; default: return 0; } *out++ = '\\'; *out++ = to; *dst = out; *osz -= 2; return 1; } static int escape_special(unsigned char c, char **dst, size_t *osz) { char *out = *dst; unsigned char to; if (*osz < 2) return -ENOMEM; switch (c) { case '\\': to = '\\'; break; case '\a': to = 'a'; break; case '\e': to = 'e'; break; default: return 0; } *out++ = '\\'; *out++ = to; *dst = out; *osz -= 2; return 1; } static int escape_null(unsigned char c, char **dst, size_t *osz) { char *out = *dst; if (*osz < 2) return -ENOMEM; if (c) return 0; *out++ = '\\'; *out++ = '0'; *dst = out; *osz -= 2; return 1; } static int escape_octal(unsigned char c, char **dst, size_t *osz) { char *out = *dst; if (*osz < 4) return -ENOMEM; *out++ = '\\'; *out++ = ((c >> 6) & 0x07) + '0'; *out++ = ((c >> 3) & 0x07) + '0'; *out++ = ((c >> 0) & 0x07) + '0'; *dst = out; *osz -= 4; return 1; } static int escape_hex(unsigned char c, char **dst, size_t *osz) { char *out = *dst; if (*osz < 4) return -ENOMEM; *out++ = '\\'; *out++ = 'x'; *out++ = hex_asc_hi(c); *out++ = hex_asc_lo(c); *dst = out; *osz -= 4; return 1; } /** * string_escape_mem - quote characters in the given memory buffer * @src: source buffer (unescaped) * @isz: source buffer size * @dst: destination buffer (escaped) * @osz: destination buffer size * @flags: combination of the flags (bitwise OR): * %ESCAPE_SPACE: * '\f' - form feed * '\n' - new line * '\r' - carriage return * '\t' - horizontal tab * '\v' - vertical tab * %ESCAPE_SPECIAL: * '\\' - backslash * '\a' - alert (BEL) * '\e' - escape * %ESCAPE_NULL: * '\0' - null * %ESCAPE_OCTAL: * '\NNN' - byte with octal value NNN (3 digits) * %ESCAPE_ANY: * all previous together * %ESCAPE_NP: * escape only non-printable characters (checked by isprint) * %ESCAPE_ANY_NP: * all previous together * %ESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (2 digits) * @esc: NULL-terminated string of characters any of which, if found in * the source, has to be escaped * * Description: * The process of escaping byte buffer includes several parts. They are applied * in the following sequence. * 1. The character is matched to the printable class, if asked, and in * case of match it passes through to the output. * 2. The character is not matched to the one from @esc string and thus * must go as is to the output. * 3. The character is checked if it falls into the class given by @flags. * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any * character. Note that they actually can't go together, otherwise * %ESCAPE_HEX will be ignored. * * Caller must provide valid source and destination pointers. Be aware that * destination buffer will not be NULL-terminated, thus caller have to append * it if needs. * * Return: * The amount of the characters processed to the destination buffer, or * %-ENOMEM if the size of buffer is not enough to put an escaped character is * returned. * * Even in the case of error @dst pointer will be updated to point to the byte * after the last processed character. */ int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz, unsigned int flags, const char *esc) { char *out = *dst, *p = out; bool is_dict = esc && *esc; int ret = 0; while (isz--) { unsigned char c = *src++; /* * Apply rules in the following sequence: * - the character is printable, when @flags has * %ESCAPE_NP bit set * - the @esc string is supplied and does not contain a * character under question * - the character doesn't fall into a class of symbols * defined by given @flags * In these cases we just pass through a character to the * output buffer. */ if ((flags & ESCAPE_NP && isprint(c)) || (is_dict && !strchr(esc, c))) { /* do nothing */ } else { if (flags & ESCAPE_SPACE) { ret = escape_space(c, &p, &osz); if (ret < 0) break; if (ret > 0) continue; } if (flags & ESCAPE_SPECIAL) { ret = escape_special(c, &p, &osz); if (ret < 0) break; if (ret > 0) continue; } if (flags & ESCAPE_NULL) { ret = escape_null(c, &p, &osz); if (ret < 0) break; if (ret > 0) continue; } /* ESCAPE_OCTAL and ESCAPE_HEX always go last */ if (flags & ESCAPE_OCTAL) { ret = escape_octal(c, &p, &osz); if (ret < 0) break; continue; } if (flags & ESCAPE_HEX) { ret = escape_hex(c, &p, &osz); if (ret < 0) break; continue; } } ret = escape_passthrough(c, &p, &osz); if (ret < 0) break; } *dst = p; if (ret < 0) return ret; return p - out; } EXPORT_SYMBOL(string_escape_mem);
gpl-2.0
iwinoto/v4l-media_build-devel
media/crypto/lz4hc.c
916
2601
/* * Cryptographic API. * * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> struct lz4hc_ctx { void *lz4hc_comp_mem; }; static int lz4hc_init(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS); if (!ctx->lz4hc_comp_mem) return -ENOMEM; return 0; } static void lz4hc_exit(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); vfree(ctx->lz4hc_comp_mem); } static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); size_t tmp_len = *dlen; int err; err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem); if (err < 0) return -EINVAL; *dlen = tmp_len; return 0; } static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { int err; size_t tmp_len = *dlen; size_t __slen = slen; err = lz4_decompress(src, &__slen, dst, tmp_len); if (err < 0) return -EINVAL; *dlen = tmp_len; return err; } static struct crypto_alg alg_lz4hc = { .cra_name = "lz4hc", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct lz4hc_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg_lz4hc.cra_list), .cra_init = lz4hc_init, .cra_exit = lz4hc_exit, .cra_u = { .compress = { .coa_compress = lz4hc_compress_crypto, .coa_decompress = lz4hc_decompress_crypto } } }; static int __init lz4hc_mod_init(void) { return crypto_register_alg(&alg_lz4hc); } static void __exit lz4hc_mod_fini(void) { crypto_unregister_alg(&alg_lz4hc); } module_init(lz4hc_mod_init); module_exit(lz4hc_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
gpl-2.0
zombi-x/android_kernel_lge_hammerhead
fs/buffer.c
1172
88319
/* * linux/fs/buffer.c * * Copyright (C) 1991, 1992, 2002 Linus Torvalds */ /* * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 * * Removed a lot of unnecessary code and simplified things now that * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 * * Speed up hash, lru, and free list operations. Use gfp() for allocating * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM * * Added 32k buffer block sizes - these are required older ARM systems. - RMK * * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> */ #include <linux/kernel.h> #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/blkdev.h> #include <linux/file.h> #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/writeback.h> #include <linux/hash.h> #include <linux/suspend.h> #include <linux/buffer_head.h> #include <linux/task_io_accounting_ops.h> #include <linux/bio.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/bitops.h> #include <linux/mpage.h> #include <linux/bit_spinlock.h> static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) inline void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) { bh->b_end_io = handler; bh->b_private = private; } EXPORT_SYMBOL(init_buffer); static int sleep_on_buffer(void *word) { io_schedule(); return 0; } void __lock_buffer(struct buffer_head *bh) { wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_buffer); void unlock_buffer(struct buffer_head *bh) { clear_bit_unlock(BH_Lock, &bh->b_state); smp_mb__after_clear_bit(); wake_up_bit(&bh->b_state, BH_Lock); } EXPORT_SYMBOL(unlock_buffer); /* * Block until a buffer comes unlocked. This doesn't stop it * from becoming locked again - you have to lock it yourself * if you want to preserve its state. */ void __wait_on_buffer(struct buffer_head * bh) { wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__wait_on_buffer); static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } static int quiet_error(struct buffer_head *bh) { if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) return 0; return 1; } static void buffer_io_error(struct buffer_head *bh) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } /* * End-of-IO handler helper function which does not touch the bh after * unlocking it. * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but * a race there is benign: unlock_buffer() only use the bh's address for * hashing after unlocking the buffer, so it doesn't actually touch the bh * itself. */ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { /* This happens, due to failed READA attempts. */ clear_buffer_uptodate(bh); } unlock_buffer(bh); } /* * Default synchronous end-of-IO handler.. Just mark it up-to-date and * unlock the buffer. This is what ll_rw_block uses too. */ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) { __end_buffer_read_notouch(bh, uptodate); put_bh(bh); } EXPORT_SYMBOL(end_buffer_read_sync); void end_buffer_write_sync(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (uptodate) { set_buffer_uptodate(bh); } else { if (!quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); } set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); } unlock_buffer(bh); put_bh(bh); } EXPORT_SYMBOL(end_buffer_write_sync); /* * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, * we get exclusion from try_to_free_buffers with the blockdev mapping's * private_lock. * * Hack idea: for the blockdev mapping, i_bufferlist_lock contention * may be quite high. This code could TryLock the page, and if that * succeeds, there is no need to take private_lock. (But if * private_lock is contended then so is mapping->tree_lock). */ static struct buffer_head * __find_get_block_slow(struct block_device *bdev, sector_t block) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; struct buffer_head *ret = NULL; pgoff_t index; struct buffer_head *bh; struct buffer_head *head; struct page *page; int all_mapped = 1; index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); page = find_get_page(bd_mapping, index); if (!page) goto out; spin_lock(&bd_mapping->private_lock); if (!page_has_buffers(page)) goto out_unlock; head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh)) all_mapped = 0; else if (bh->b_blocknr == block) { ret = bh; get_bh(bh); goto out_unlock; } bh = bh->b_this_page; } while (bh != head); /* we might be here because some of the buffers on this page are * not mapped. This is due to various races between * file io on the block device and getblk. It gets dealt with * elsewhere, don't buffer_error if we had some unmapped buffers */ if (all_mapped) { char b[BDEVNAME_SIZE]; printk("__find_get_block_slow() failed. " "block=%llu, b_blocknr=%llu\n", (unsigned long long)block, (unsigned long long)bh->b_blocknr); printk("b_state=0x%08lx, b_size=%zu\n", bh->b_state, bh->b_size); printk("device %s blocksize: %d\n", bdevname(bdev, b), 1 << bd_inode->i_blkbits); } out_unlock: spin_unlock(&bd_mapping->private_lock); page_cache_release(page); out: return ret; } /* * Kick the writeback threads then try to free up some ZONE_NORMAL memory. */ static void free_more_memory(void) { struct zone *zone; int nid; wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); yield(); for_each_online_node(nid) { (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), gfp_zone(GFP_NOFS), NULL, &zone); if (zone) try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, GFP_NOFS, NULL); } } /* * I/O completion handler for block_read_full_page() - pages * which come unlocked at the end of I/O. */ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) { unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct page *page; int page_uptodate = 1; BUG_ON(!buffer_async_read(bh)); page = bh->b_page; if (uptodate) { set_buffer_uptodate(bh); } else { clear_buffer_uptodate(bh); if (!quiet_error(bh)) buffer_io_error(bh); SetPageError(page); } /* * Be _very_ careful from here on. Bad things can happen if * two buffer heads end IO at almost the same time and both * decide that the page is now completely done. */ first = page_buffers(page); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; do { if (!buffer_uptodate(tmp)) page_uptodate = 0; if (buffer_async_read(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } while (tmp != bh); bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); /* * If none of the buffers had errors and they are all * uptodate then we can set the page uptodate. */ if (page_uptodate && !PageError(page)) SetPageUptodate(page); unlock_page(page); return; still_busy: bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); return; } /* * Completion handler for block_write_full_page() - pages which are unlocked * during I/O, and which have PageWriteback cleared upon I/O completion. */ void end_buffer_async_write(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct page *page; BUG_ON(!buffer_async_write(bh)); page = bh->b_page; if (uptodate) { set_buffer_uptodate(bh); } else { if (!quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); } set_bit(AS_EIO, &page->mapping->flags); set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); SetPageError(page); } first = page_buffers(page); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_write(bh); unlock_buffer(bh); tmp = bh->b_this_page; while (tmp != bh) { if (buffer_async_write(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); end_page_writeback(page); return; still_busy: bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); return; } EXPORT_SYMBOL(end_buffer_async_write); /* * If a page's buffers are under async readin (end_buffer_async_read * completion) then there is a possibility that another thread of * control could lock one of the buffers after it has completed * but while some of the other buffers have not completed. This * locked buffer would confuse end_buffer_async_read() into not unlocking * the page. So the absence of BH_Async_Read tells end_buffer_async_read() * that this buffer is not under async I/O. * * The page comes unlocked when it has no locked buffer_async buffers * left. * * PageLocked prevents anyone starting new async I/O reads any of * the buffers. * * PageWriteback is used to prevent simultaneous writeout of the same * page. * * PageLocked prevents anyone from starting writeback of a page which is * under read I/O (PageWriteback is only ever set against a locked page). */ static void mark_buffer_async_read(struct buffer_head *bh) { bh->b_end_io = end_buffer_async_read; set_buffer_async_read(bh); } static void mark_buffer_async_write_endio(struct buffer_head *bh, bh_end_io_t *handler) { bh->b_end_io = handler; set_buffer_async_write(bh); } void mark_buffer_async_write(struct buffer_head *bh) { mark_buffer_async_write_endio(bh, end_buffer_async_write); } EXPORT_SYMBOL(mark_buffer_async_write); /* * fs/buffer.c contains helper functions for buffer-backed address space's * fsync functions. A common requirement for buffer-based filesystems is * that certain data from the backing blockdev needs to be written out for * a successful fsync(). For example, ext2 indirect blocks need to be * written back and waited upon before fsync() returns. * * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), * inode_has_buffers() and invalidate_inode_buffers() are provided for the * management of a list of dependent buffers at ->i_mapping->private_list. * * Locking is a little subtle: try_to_free_buffers() will remove buffers * from their controlling inode's queue when they are being freed. But * try_to_free_buffers() will be operating against the *blockdev* mapping * at the time, not against the S_ISREG file which depends on those buffers. * So the locking for private_list is via the private_lock in the address_space * which backs the buffers. Which is different from the address_space * against which the buffers are listed. So for a particular address_space, * mapping->private_lock does *not* protect mapping->private_list! In fact, * mapping->private_list will always be protected by the backing blockdev's * ->private_lock. * * Which introduces a requirement: all buffers on an address_space's * ->private_list must be from the same address_space: the blockdev's. * * address_spaces which do not place buffers at ->private_list via these * utility functions are free to use private_lock and private_list for * whatever they want. The only requirement is that list_empty(private_list) * be true at clear_inode() time. * * FIXME: clear_inode should not call invalidate_inode_buffers(). The * filesystems should do that. invalidate_inode_buffers() should just go * BUG_ON(!list_empty). * * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should * take an address_space, not an inode. And it should be called * mark_buffer_dirty_fsync() to clearly define why those buffers are being * queued up. * * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the * list if it is already on a list. Because if the buffer is on a list, * it *must* already be on the right one. If not, the filesystem is being * silly. This will save a ton of locking. But first we have to ensure * that buffers are taken *off* the old inode's list when they are freed * (presumably in truncate). That requires careful auditing of all * filesystems (do it inside bforget()). It could also be done by bringing * b_inode back. */ /* * The buffer's backing address_space's private_lock must be held */ static void __remove_assoc_queue(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); WARN_ON(!bh->b_assoc_map); if (buffer_write_io_error(bh)) set_bit(AS_EIO, &bh->b_assoc_map->flags); bh->b_assoc_map = NULL; } int inode_has_buffers(struct inode *inode) { return !list_empty(&inode->i_data.private_list); } /* * osync is designed to support O_SYNC io. It waits synchronously for * all already-submitted IO to complete, but does not queue any new * writes to the disk. * * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as * you dirty the buffers, and then use osync_inode_buffers to wait for * completion. Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */ static int osync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head *p; int err = 0; spin_lock(lock); repeat: list_for_each_prev(p, list) { bh = BH_ENTRY(p); if (buffer_locked(bh)) { get_bh(bh); spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); goto repeat; } } spin_unlock(lock); return err; } static void do_thaw_one(struct super_block *sb, void *unused) { char b[BDEVNAME_SIZE]; while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) printk(KERN_WARNING "Emergency Thaw on %s\n", bdevname(sb->s_bdev, b)); } static void do_thaw_all(struct work_struct *work) { iterate_supers(do_thaw_one, NULL); kfree(work); printk(KERN_WARNING "Emergency Thaw complete\n"); } /** * emergency_thaw_all -- forcibly thaw every frozen filesystem * * Used for emergency unfreeze of all filesystems via SysRq */ void emergency_thaw_all(void) { struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_thaw_all); schedule_work(work); } } /** * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->private_list, and waits upon * that I/O. * * Basically, this is a convenience function for fsync(). * @mapping is a file or directory which needs those buffers to be written for * a successful fsync(). */ int sync_mapping_buffers(struct address_space *mapping) { struct address_space *buffer_mapping = mapping->assoc_mapping; if (buffer_mapping == NULL || list_empty(&mapping->private_list)) return 0; return fsync_buffers_list(&buffer_mapping->private_lock, &mapping->private_list); } EXPORT_SYMBOL(sync_mapping_buffers); /* * Called when we've recently written block `bblock', and it is known that * `bblock' was for a buffer_boundary() buffer. This means that the block at * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's * dirty, schedule it for IO. So that indirects merge nicely with their data. */ void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize) { struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); if (bh) { if (buffer_dirty(bh)) ll_rw_block(WRITE, 1, &bh); put_bh(bh); } } void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) { struct address_space *mapping = inode->i_mapping; struct address_space *buffer_mapping = bh->b_page->mapping; mark_buffer_dirty(bh); if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (!bh->b_assoc_map) { spin_lock(&buffer_mapping->private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->private_list); bh->b_assoc_map = mapping; spin_unlock(&buffer_mapping->private_lock); } } EXPORT_SYMBOL(mark_buffer_dirty_inode); /* * Mark the page dirty, and set it dirty in the radix tree, and mark the inode * dirty. * * If warn is true, then emit a warning if the page is not uptodate and has * not been truncated. */ static void __set_page_dirty(struct page *page, struct address_space *mapping, int warn) { spin_lock_irq(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&mapping->tree_lock); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } /* * Add a page to the dirty page list. * * It is a sad fact of life that this function is called from several places * deeply under spinlocking. It may not sleep. * * If the page has buffers, the uptodate buffers are set dirty, to preserve * dirty-state coherency between the page and the buffers. It the page does * not have buffers then when they are later attached they will all be set * dirty. * * The buffers are dirtied before the page is dirtied. There's a small race * window in which a writepage caller may see the page cleanness but not the * buffer dirtiness. That's fine. If this code were to set the page dirty * before the buffers, a concurrent writepage caller could clear the page dirty * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean * page on the dirty page list. * * We use private_lock to lock against try_to_free_buffers while using the * page's buffer list. Also use this to protect against clean buffers being * added to the page after it was set dirty. * * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. */ int __set_page_dirty_buffers(struct page *page) { int newly_dirty; struct address_space *mapping = page_mapping(page); if (unlikely(!mapping)) return !TestSetPageDirty(page); spin_lock(&mapping->private_lock); if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; do { set_buffer_dirty(bh); bh = bh->b_this_page; } while (bh != head); } newly_dirty = !TestSetPageDirty(page); spin_unlock(&mapping->private_lock); if (newly_dirty) __set_page_dirty(page, mapping, 1); return newly_dirty; } EXPORT_SYMBOL(__set_page_dirty_buffers); /* * Write out and wait upon a list of buffers. * * We have conflicting pressures: we want to make sure that all * initially dirty buffers get waited on, but that any subsequently * dirtied buffers don't. After all, we don't want fsync to last * forever if somebody is actively writing to the file. * * Do this in two main stages: first we copy dirty buffers to a * temporary inode list, queueing the writes as we go. Then we clean * up, waiting for those writes to complete. * * During this second stage, any subsequent updates to the file may end * up refiling the buffer on the original inode's dirty list again, so * there is a chance we will end up with a buffer queued for write but * not yet completed on that list. So, as a final cleanup we go through * the osync code to catch these locked, dirty buffers without requeuing * any newly dirty buffers for write. */ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; struct address_space *mapping; int err = 0, err2; struct blk_plug plug; INIT_LIST_HEAD(&tmp); blk_start_plug(&plug); spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); mapping = bh->b_assoc_map; __remove_assoc_queue(bh); /* Avoid race with mark_buffer_dirty_inode() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); bh->b_assoc_map = mapping; if (buffer_dirty(bh)) { get_bh(bh); spin_unlock(lock); /* * Ensure any pending I/O completes so that * write_dirty_buffer() actually writes the * current contents - it is a noop if I/O is * still in flight on potentially older * contents. */ write_dirty_buffer(bh, WRITE_SYNC); /* * Kick off IO for the previous mapping. Note * that we will not run the very last mapping, * wait_on_buffer() will do that for us * through sync_buffer(). */ brelse(bh); spin_lock(lock); } } } spin_unlock(lock); blk_finish_plug(&plug); spin_lock(lock); while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); get_bh(bh); mapping = bh->b_assoc_map; __remove_assoc_queue(bh); /* Avoid race with mark_buffer_dirty_inode() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh)) { list_add(&bh->b_assoc_buffers, &mapping->private_list); bh->b_assoc_map = mapping; } spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); } spin_unlock(lock); err2 = osync_buffers_list(lock, list); if (err) return err; else return err2; } /* * Invalidate any and all dirty buffers on a given inode. We are * probably unmounting the fs, but that doesn't mean we have already * done a sync(). Just drop the buffers from the inode list. * * NOTE: we take the inode's blockdev's mapping's private_lock. Which * assumes that all the buffers are against the blockdev. Not true * for reiserfs. */ void invalidate_inode_buffers(struct inode *inode) { if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->private_list; struct address_space *buffer_mapping = mapping->assoc_mapping; spin_lock(&buffer_mapping->private_lock); while (!list_empty(list)) __remove_assoc_queue(BH_ENTRY(list->next)); spin_unlock(&buffer_mapping->private_lock); } } EXPORT_SYMBOL(invalidate_inode_buffers); /* * Remove any clean buffers from the inode's buffer list. This is called * when we're trying to free the inode itself. Those buffers can pin it. * * Returns true if all buffers were removed. */ int remove_inode_buffers(struct inode *inode) { int ret = 1; if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->private_list; struct address_space *buffer_mapping = mapping->assoc_mapping; spin_lock(&buffer_mapping->private_lock); while (!list_empty(list)) { struct buffer_head *bh = BH_ENTRY(list->next); if (buffer_dirty(bh)) { ret = 0; break; } __remove_assoc_queue(bh); } spin_unlock(&buffer_mapping->private_lock); } return ret; } /* * Create the appropriate buffers when given a page for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created. Return NULL if unable to create more * buffers. * * The retry flag is used to differentiate async IO (paging, swapping) * which may not fail from ordinary buffer allocations. */ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, int retry) { struct buffer_head *bh, *head; long offset; try_again: head = NULL; offset = PAGE_SIZE; while ((offset -= size) >= 0) { bh = alloc_buffer_head(GFP_NOFS); if (!bh) goto no_grow; bh->b_bdev = NULL; bh->b_this_page = head; bh->b_blocknr = -1; head = bh; bh->b_state = 0; atomic_set(&bh->b_count, 0); bh->b_size = size; /* Link the buffer to its page */ set_bh_page(bh, page, offset); init_buffer(bh, NULL, NULL); } return head; /* * In case anything failed, we just free everything we got. */ no_grow: if (head) { do { bh = head; head = head->b_this_page; free_buffer_head(bh); } while (head); } /* * Return failure for non-async IO requests. Async IO requests * are not allowed to fail, so we have to wait until buffer heads * become available. But we don't want tasks sleeping with * partially complete buffers, so all were released above. */ if (!retry) return NULL; /* We're _really_ low on memory. Now we just * wait for old buffer heads to become free due to * finishing IO. Since this is an async request and * the reserve list is empty, we're sure there are * async buffer heads in use. */ free_more_memory(); goto try_again; } EXPORT_SYMBOL_GPL(alloc_page_buffers); static inline void link_dev_buffers(struct page *page, struct buffer_head *head) { struct buffer_head *bh, *tail; bh = head; do { tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; attach_page_buffers(page, head); } /* * Initialise the state of a blockdev page's buffers. */ static void init_page_buffers(struct page *page, struct block_device *bdev, sector_t block, int size) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; int uptodate = PageUptodate(page); sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode)); do { if (!buffer_mapped(bh)) { init_buffer(bh, NULL, NULL); bh->b_bdev = bdev; bh->b_blocknr = block; if (uptodate) set_buffer_uptodate(bh); if (block < end_block) set_buffer_mapped(bh); } block++; bh = bh->b_this_page; } while (bh != head); } /* * Create the page-cache page that contains the requested block. * * This is user purely for blockdev mappings. */ static struct page * grow_dev_page(struct block_device *bdev, sector_t block, pgoff_t index, int size) { struct inode *inode = bdev->bd_inode; struct page *page; struct buffer_head *bh; page = find_or_create_page(inode->i_mapping, index, (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); if (!page) return NULL; BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); if (bh->b_size == size) { init_page_buffers(page, bdev, block, size); return page; } if (!try_to_free_buffers(page)) goto failed; } /* * Allocate some buffers for this page */ bh = alloc_page_buffers(page, size, 0); if (!bh) goto failed; /* * Link the page to the buffers and initialise them. Take the * lock to be atomic wrt __find_get_block(), which does not * run under the page lock. */ spin_lock(&inode->i_mapping->private_lock); link_dev_buffers(page, bh); init_page_buffers(page, bdev, block, size); spin_unlock(&inode->i_mapping->private_lock); return page; failed: unlock_page(page); page_cache_release(page); return NULL; } /* * Create buffers for the specified block device block's page. If * that page was dirty, the buffers are set dirty also. */ static int grow_buffers(struct block_device *bdev, sector_t block, int size) { struct page *page; pgoff_t index; int sizebits; sizebits = -1; do { sizebits++; } while ((size << sizebits) < PAGE_SIZE); index = block >> sizebits; /* * Check for a block which wants to lie outside our maximum possible * pagecache index. (this comparison is done using sector_t types). */ if (unlikely(index != block >> sizebits)) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "%s: requested out-of-range block %llu for " "device %s\n", __func__, (unsigned long long)block, bdevname(bdev, b)); return -EIO; } block = index << sizebits; /* Create a page with the proper size buffers.. */ page = grow_dev_page(bdev, block, index, size); if (!page) return 0; unlock_page(page); page_cache_release(page); return 1; } static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { printk(KERN_ERR "getblk(): invalid block size %d requested\n", size); printk(KERN_ERR "logical block size: %d\n", bdev_logical_block_size(bdev)); dump_stack(); return NULL; } for (;;) { struct buffer_head * bh; int ret; bh = __find_get_block(bdev, block, size); if (bh) return bh; ret = grow_buffers(bdev, block, size); if (ret < 0) return NULL; if (ret == 0) free_more_memory(); } } /* * The relationship between dirty buffers and dirty pages: * * Whenever a page has any dirty buffers, the page's dirty bit is set, and * the page is tagged dirty in its radix tree. * * At all times, the dirtiness of the buffers represents the dirtiness of * subsections of the page. If the page has buffers, the page dirty bit is * merely a hint about the true dirty state. * * When a page is set dirty in its entirety, all its buffers are marked dirty * (if the page has buffers). * * When a buffer is marked dirty, its page is dirtied, but the page's other * buffers are not. * * Also. When blockdev buffers are explicitly read with bread(), they * individually become uptodate. But their backing page remains not * uptodate - even if all of its buffers are uptodate. A subsequent * block_read_full_page() against that page will discover all the uptodate * buffers, will set the page uptodate and will perform no I/O. */ /** * mark_buffer_dirty - mark a buffer_head as needing writeout * @bh: the buffer_head to mark dirty * * mark_buffer_dirty() will set the dirty bit against the buffer, then set its * backing page dirty, then tag the page as dirty in its address_space's radix * tree and then attach the address_space's inode to its superblock's dirty * inode list. * * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mapping->tree_lock and mapping->host->i_lock. */ void mark_buffer_dirty(struct buffer_head *bh) { WARN_ON_ONCE(!buffer_uptodate(bh)); /* * Very *carefully* optimize the it-is-already-dirty case. * * Don't let the final "is it dirty" escape to before we * perhaps modified the buffer. */ if (buffer_dirty(bh)) { smp_mb(); if (buffer_dirty(bh)) return; } if (!test_set_buffer_dirty(bh)) { struct page *page = bh->b_page; if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); if (mapping) __set_page_dirty(page, mapping, 0); } } } EXPORT_SYMBOL(mark_buffer_dirty); /* * Decrement a buffer_head's reference count. If all buffers against a page * have zero reference count, are clean and unlocked, and if the page is clean * and unlocked then try_to_free_buffers() may strip the buffers from the page * in preparation for freeing it (sometimes, rarely, buffers are removed from * a page but it ends up not being freed, and buffers may later be reattached). */ void __brelse(struct buffer_head * buf) { if (atomic_read(&buf->b_count)) { put_bh(buf); return; } WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); } EXPORT_SYMBOL(__brelse); /* * bforget() is like brelse(), except it discards any * potentially dirty data. */ void __bforget(struct buffer_head *bh) { clear_buffer_dirty(bh); if (bh->b_assoc_map) { struct address_space *buffer_mapping = bh->b_page->mapping; spin_lock(&buffer_mapping->private_lock); list_del_init(&bh->b_assoc_buffers); bh->b_assoc_map = NULL; spin_unlock(&buffer_mapping->private_lock); } __brelse(bh); } EXPORT_SYMBOL(__bforget); static struct buffer_head *__bread_slow(struct buffer_head *bh) { lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; } else { get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; } brelse(bh); return NULL; } /* * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their * refcount elevated by one when they're in an LRU. A buffer can only appear * once in a particular CPU's LRU. A single buffer can be present in multiple * CPU's LRUs at the same time. * * This is a transparent caching front-end to sb_bread(), sb_getblk() and * sb_find_get_block(). * * The LRUs themselves only need locking against invalidate_bh_lrus. We use * a local interrupt disable for that. */ #define BH_LRU_SIZE 8 struct bh_lru { struct buffer_head *bhs[BH_LRU_SIZE]; }; static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; #ifdef CONFIG_SMP #define bh_lru_lock() local_irq_disable() #define bh_lru_unlock() local_irq_enable() #else #define bh_lru_lock() preempt_disable() #define bh_lru_unlock() preempt_enable() #endif static inline void check_irqs_on(void) { #ifdef irqs_disabled BUG_ON(irqs_disabled()); #endif } /* * The LRU management algorithm is dopey-but-simple. Sorry. */ static void bh_lru_install(struct buffer_head *bh) { struct buffer_head *evictee = NULL; check_irqs_on(); bh_lru_lock(); if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { struct buffer_head *bhs[BH_LRU_SIZE]; int in; int out = 0; get_bh(bh); bhs[out++] = bh; for (in = 0; in < BH_LRU_SIZE; in++) { struct buffer_head *bh2 = __this_cpu_read(bh_lrus.bhs[in]); if (bh2 == bh) { __brelse(bh2); } else { if (out >= BH_LRU_SIZE) { BUG_ON(evictee != NULL); evictee = bh2; } else { bhs[out++] = bh2; } } } while (out < BH_LRU_SIZE) bhs[out++] = NULL; memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); } bh_lru_unlock(); if (evictee) __brelse(evictee); } /* * Look up the bh in this cpu's LRU. If it's there, move it to the head. */ static struct buffer_head * lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *ret = NULL; unsigned int i; check_irqs_on(); bh_lru_lock(); for (i = 0; i < BH_LRU_SIZE; i++) { struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); if (bh && bh->b_bdev == bdev && bh->b_blocknr == block && bh->b_size == size) { if (i) { while (i) { __this_cpu_write(bh_lrus.bhs[i], __this_cpu_read(bh_lrus.bhs[i - 1])); i--; } __this_cpu_write(bh_lrus.bhs[0], bh); } get_bh(bh); ret = bh; break; } } bh_lru_unlock(); return ret; } /* * Perform a pagecache lookup for the matching buffer. If it's there, refresh * it in the LRU and mark it as accessed. If it is not present then return * NULL */ struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { bh = __find_get_block_slow(bdev, block); if (bh) bh_lru_install(bh); } if (bh) touch_buffer(bh); return bh; } EXPORT_SYMBOL(__find_get_block); /* * __getblk will locate (and, if necessary, create) the buffer_head * which corresponds to the passed block_device, block and size. The * returned buffer has its reference count incremented. * * __getblk() cannot fail - it just keeps trying. If you pass it an * illegal block number, __getblk() will happily return a buffer_head * which represents the non-existent block. Very weird. * * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() * attempt is failing. FIXME, perhaps? */ struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __find_get_block(bdev, block, size); might_sleep(); if (bh == NULL) bh = __getblk_slow(bdev, block, size); return bh; } EXPORT_SYMBOL(__getblk); /* * Do async read-ahead on a buffer.. */ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh)) { ll_rw_block(READA, 1, &bh); brelse(bh); } } EXPORT_SYMBOL(__breadahead); /** * __bread() - reads a specified block and returns the bh * @bdev: the block_device to read from * @block: number of block * @size: size (in bytes) to read * * Reads a specified block, and returns buffer head that contains it. * It returns NULL if the block was unreadable. */ struct buffer_head * __bread(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } EXPORT_SYMBOL(__bread); /* * invalidate_bh_lrus() is called rarely - but not only at unmount. * This doesn't race because it runs in each cpu either in irq * or with preempt disabled. */ static void invalidate_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); int i; for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } put_cpu_var(bh_lrus); } static bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i]) return 1; } return 0; } static void __evict_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); struct buffer_head *bh = arg; int i; for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i] == bh) { brelse(b->bhs[i]); b->bhs[i] = NULL; goto out; } } out: put_cpu_var(bh_lrus); } static bool bh_exists_in_lru(int cpu, void *arg) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); struct buffer_head *bh = arg; int i; for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i] == bh) return 1; } return 0; } void invalidate_bh_lrus(void) { on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); void evict_bh_lrus(struct buffer_head *bh) { on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(evict_bh_lrus); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) /* * This catches illegal uses and preserves the offset: */ bh->b_data = (char *)(0 + offset); else bh->b_data = page_address(page) + offset; } EXPORT_SYMBOL(set_bh_page); /* * Called when truncating a buffer on a page completely. */ static void discard_buffer(struct buffer_head * bh) { lock_buffer(bh); clear_buffer_dirty(bh); bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); unlock_buffer(bh); } /** * block_invalidatepage - invalidate part or all of a buffer-backed page * * @page: the page which is affected * @offset: the index of the truncation point * * block_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. * * block_invalidatepage() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; /* * is this block fully invalidated? */ if (offset <= curr_off) discard_buffer(bh); curr_off = next_off; bh = next; } while (bh != head); /* * We release buffers only if the entire page is being invalidated. * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ if (offset == 0) try_to_release_page(page, 0); out: return; } EXPORT_SYMBOL(block_invalidatepage); /* * We attach and possibly dirty the buffers atomically wrt * __set_page_dirty_buffers() via private_lock. try_to_free_buffers * is already excluded via the page lock. */ void create_empty_buffers(struct page *page, unsigned long blocksize, unsigned long b_state) { struct buffer_head *bh, *head, *tail; head = alloc_page_buffers(page, blocksize, 1); bh = head; do { bh->b_state |= b_state; tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; spin_lock(&page->mapping->private_lock); if (PageUptodate(page) || PageDirty(page)) { bh = head; do { if (PageDirty(page)) set_buffer_dirty(bh); if (PageUptodate(page)) set_buffer_uptodate(bh); bh = bh->b_this_page; } while (bh != head); } attach_page_buffers(page, head); spin_unlock(&page->mapping->private_lock); } EXPORT_SYMBOL(create_empty_buffers); /* * We are taking a block for data and we don't want any output from any * buffer-cache aliases starting from return from that function and * until the moment when something will explicitly mark the buffer * dirty (hopefully that will not happen until we will free that block ;-) * We don't even need to mark it not-uptodate - nobody can expect * anything from a newly allocated buffer anyway. We used to used * unmap_buffer() for such invalidation, but that was wrong. We definitely * don't want to mark the alias unmapped, for example - it would confuse * anyone who might pick it with bread() afterwards... * * Also.. Note that bforget() doesn't lock the buffer. So there can * be writeout I/O going on against recently-freed buffers. We don't * wait on that I/O in bforget() - it's more efficient to wait on the I/O * only if we really need to. That happens here. */ void unmap_underlying_metadata(struct block_device *bdev, sector_t block) { struct buffer_head *old_bh; might_sleep(); old_bh = __find_get_block_slow(bdev, block); if (old_bh) { clear_buffer_dirty(old_bh); wait_on_buffer(old_bh); clear_buffer_req(old_bh); __brelse(old_bh); } } EXPORT_SYMBOL(unmap_underlying_metadata); /* * NOTE! All mapped/uptodate combinations are valid: * * Mapped Uptodate Meaning * * No No "unknown" - must do get_block() * No Yes "hole" - zero-filled * Yes No "allocated" - allocated on disk, not read in * Yes Yes "valid" - allocated and up-to-date in memory. * * "Dirty" is valid only with the last case (mapped+uptodate). */ /* * While block_write_full_page is writing back the dirty buffers under * the page lock, whoever dirtied the buffers may decide to clean them * again at any time. We handle that by only looking at the buffer * state inside lock_buffer(). * * If block_write_full_page() is called for regular writeback * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a * locked buffer. This only can happen if someone has written the buffer * directly, with submit_bh(). At the address_space level PageWriteback * prevents this contention from occurring. * * If block_write_full_page() is called with wbc->sync_mode == * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this * causes the writes to be flagged as synchronous writes. */ static int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) { int err; sector_t block; sector_t last_block; struct buffer_head *bh, *head; const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); BUG_ON(!PageLocked(page)); last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; if (!page_has_buffers(page)) { create_empty_buffers(page, blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } /* * Be very careful. We have no exclusion from __set_page_dirty_buffers * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; * handle that here by just cleaning them. */ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); head = page_buffers(page); bh = head; /* * Get all the dirty buffers mapped to disk addresses and * handle any aliases from the underlying blockdev's mapping. */ do { if (block > last_block) { /* * mapped buffers outside i_size will occur, because * this page can be outside i_size when there is a * truncate in progress. */ /* * The buffer was zeroed by block_write_full_page() */ clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; clear_buffer_delay(bh); if (buffer_new(bh)) { /* blockdev mappings never come here */ clear_buffer_new(bh); unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); } } bh = bh->b_this_page; block++; } while (bh != head); do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can * potentially cause a busy-wait loop from writeback threads * and kswapd activity, but those code paths have their own * higher-level throttling. */ if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { redirty_page_for_writepage(wbc, page); continue; } if (test_clear_buffer_dirty(bh)) { mark_buffer_async_write_endio(bh, handler); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The page and its buffers are protected by PageWriteback(), so we can * drop the bh refcounts early. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); err = 0; done: if (nr_underway == 0) { /* * The page was marked dirty, but the buffers were * clean. Someone wrote them back by hand with * ll_rw_block/submit_bh. A rare case. */ end_page_writeback(page); /* * The page and buffer_heads can be released at any time from * here on. */ } return err; recover: /* * ENOSPC, or some other error. We may already have added some * blocks to the file, so we need to write these out to avoid * exposing stale data. * The page is currently locked and not marked for writeback */ bh = head; /* Recovery: lock and submit the mapped buffers */ do { if (buffer_mapped(bh) && buffer_dirty(bh) && !buffer_delay(bh)) { lock_buffer(bh); mark_buffer_async_write_endio(bh, handler); } else { /* * The buffer may have been set dirty during * attachment to a dirty page. */ clear_buffer_dirty(bh); } } while ((bh = bh->b_this_page) != head); SetPageError(page); BUG_ON(PageWriteback(page)); mapping_set_error(page->mapping, err); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { clear_buffer_dirty(bh); submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); goto done; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) return; bh = head = page_buffers(page); block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; zero_user(page, start, size); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } EXPORT_SYMBOL(page_zero_new_buffers); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize, bbits; struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(to > PAGE_CACHE_SIZE); BUG_ON(from > to); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = inode->i_blkbits; block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); for(bh = head, block_start = 0; bh != head || !block_start; block++, block_start=block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) zero_user_segments(page, to, block_end, block_start, from); continue; } } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; } } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) err = -EIO; } if (unlikely(err)) page_zero_new_buffers(page, from, to); return err; } EXPORT_SYMBOL(__block_write_begin); static int __block_commit_write(struct inode *inode, struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; blocksize = 1 << inode->i_blkbits; for(bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start=block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = 1; } else { set_buffer_uptodate(bh); mark_buffer_dirty(bh); } clear_buffer_new(bh); } /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus readpage() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ if (!partial) SetPageUptodate(page); return 0; } /* * block_write_begin takes care of the basic task of block allocation and * bringing partial write blocks uptodate first. * * The filesystem needs to handle block truncation upon failure. */ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; struct page *page; int status; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; status = __block_write_begin(page, pos, len, get_block); if (unlikely(status)) { unlock_page(page); page_cache_release(page); page = NULL; } *pagep = page; return status; } EXPORT_SYMBOL(block_write_begin); int block_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned start; start = pos & (PAGE_CACHE_SIZE - 1); if (unlikely(copied < len)) { /* * The buffers that were written will now be uptodate, so we * don't have to worry about a readpage reading them and * overwriting a partial write. However if we have encountered * a short write and only partially written into a buffer, it * will not be marked uptodate, so a readpage might come in and * destroy our partial write. * * Do the simplest thing, and just treat any short write to a * non uptodate page as a zero-length write, and force the * caller to redo the whole thing. */ if (!PageUptodate(page)) copied = 0; page_zero_new_buffers(page, start+copied, start+len); } flush_dcache_page(page); /* This could be a short (even 0-length) commit */ __block_commit_write(inode, page, start, start+copied); return copied; } EXPORT_SYMBOL(block_write_end); int generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int i_size_changed = 0; copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold i_mutex. * * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ if (pos+copied > inode->i_size) { i_size_write(inode, pos+copied); i_size_changed = 1; } unlock_page(page); page_cache_release(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) mark_inode_dirty(inode); return copied; } EXPORT_SYMBOL(generic_write_end); /* * block_is_partially_uptodate checks whether buffers within a page are * uptodate or not. * * Returns true if all buffers which correspond to a file portion * we want to read are uptodate. */ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, unsigned long from) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; int ret = 1; if (!page_has_buffers(page)) return 0; blocksize = 1 << inode->i_blkbits; to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count); to = from + to; if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) return 0; head = page_buffers(page); bh = head; block_start = 0; do { block_end = block_start + blocksize; if (block_end > from && block_start < to) { if (!buffer_uptodate(bh)) { ret = 0; break; } if (block_end >= to) break; } block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } EXPORT_SYMBOL(block_is_partially_uptodate); /* * Generic "read page" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. * Reads the page asynchronously --- the unlock_buffer() and * set/clear_buffer_uptodate() functions propagate buffer state into the * page struct once IO has completed. */ int block_read_full_page(struct page *page, get_block_t *get_block) { struct inode *inode = page->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; unsigned int blocksize; int nr, i; int fully_mapped = 1; BUG_ON(!PageLocked(page)); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; bh = head; nr = 0; i = 0; do { if (buffer_uptodate(bh)) continue; if (!buffer_mapped(bh)) { int err = 0; fully_mapped = 0; if (iblock < lblock) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) SetPageError(page); } if (!buffer_mapped(bh)) { zero_user(page, i * blocksize, blocksize); if (!err) set_buffer_uptodate(bh); continue; } /* * get_block() might have updated the buffer * synchronously */ if (buffer_uptodate(bh)) continue; } arr[nr++] = bh; } while (i++, iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) SetPageMappedToDisk(page); if (!nr) { /* * All buffers are uptodate - we can set the page uptodate * as well. But not if get_block() returned an error. */ if (!PageError(page)) SetPageUptodate(page); unlock_page(page); return 0; } /* Stage two: lock the buffers */ for (i = 0; i < nr; i++) { bh = arr[i]; lock_buffer(bh); mark_buffer_async_read(bh); } /* * Stage 3: start the IO. Check for uptodateness * inside the buffer lock in case another process reading * the underlying blockdev brought it uptodate (the sct fix). */ for (i = 0; i < nr; i++) { bh = arr[i]; if (buffer_uptodate(bh)) end_buffer_async_read(bh, 1); else submit_bh(READ, bh); } return 0; } EXPORT_SYMBOL(block_read_full_page); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to * deal with the hole. */ int generic_cont_expand_simple(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; err = inode_newsize_ok(inode, size); if (err) goto out; err = pagecache_write_begin(NULL, mapping, size, 0, AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, &page, &fsdata); if (err) goto out; err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); BUG_ON(err > 0); out: return err; } EXPORT_SYMBOL(generic_cont_expand_simple); static int cont_expand_zero(struct file *file, struct address_space *mapping, loff_t pos, loff_t *bytes) { struct inode *inode = mapping->host; unsigned blocksize = 1 << inode->i_blkbits; struct page *page; void *fsdata; pgoff_t index, curidx; loff_t curpos; unsigned zerofrom, offset, len; int err = 0; index = pos >> PAGE_CACHE_SHIFT; offset = pos & ~PAGE_CACHE_MASK; while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { zerofrom = curpos & ~PAGE_CACHE_MASK; if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = PAGE_CACHE_SIZE - zerofrom; err = pagecache_write_begin(file, mapping, curpos, len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); err = pagecache_write_end(file, mapping, curpos, len, len, page, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; balance_dirty_pages_ratelimited(mapping); } /* page covers the boundary, find the boundary offset */ if (index == curidx) { zerofrom = curpos & ~PAGE_CACHE_MASK; /* if we will expand the thing last block will be filled */ if (offset <= zerofrom) { goto out; } if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = offset - zerofrom; err = pagecache_write_begin(file, mapping, curpos, len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); err = pagecache_write_end(file, mapping, curpos, len, len, page, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; } out: return err; } /* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ int cont_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block, loff_t *bytes) { struct inode *inode = mapping->host; unsigned blocksize = 1 << inode->i_blkbits; unsigned zerofrom; int err; err = cont_expand_zero(file, mapping, pos, bytes); if (err) return err; zerofrom = *bytes & ~PAGE_CACHE_MASK; if (pos+len > *bytes && zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } return block_write_begin(mapping, pos, len, flags, pagep, get_block); } EXPORT_SYMBOL(cont_write_begin); int block_commit_write(struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; __block_commit_write(inode,page,from,to); return 0; } EXPORT_SYMBOL(block_commit_write); /* * block_page_mkwrite() is not allowed to change the file size as it gets * called from a page fault handler when a page is first dirtied. Hence we must * be careful to check for EOF conditions here. We set the page up correctly * for a written page which means we get ENOSPC checking when writing into * holes and correct delalloc and unwritten extent mapping on filesystems that * support these features. * * We are not allowed to take the i_mutex here so we have to play games to * protect against truncate races as the page could now be beyond EOF. Because * truncate writes the inode size before removing pages, once we have the * page lock we can determine safely if the page is beyond EOF. If it is not * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. * * Direct callers of this function should call vfs_check_frozen() so that page * fault does not busyloop until the fs is thawed. */ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; unsigned long end; loff_t size; int ret; lock_page(page); size = i_size_read(inode); if ((page->mapping != inode->i_mapping) || (page_offset(page) > size)) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) end = size & ~PAGE_CACHE_MASK; else end = PAGE_CACHE_SIZE; ret = __block_write_begin(page, 0, end, get_block); if (!ret) ret = block_commit_write(page, 0, end); if (unlikely(ret < 0)) goto out_unlock; /* * Freezing in progress? We check after the page is marked dirty and * with page lock held so if the test here fails, we are sure freezing * code will wait during syncing until the page fault is done - at that * point page will be dirty and unlocked so freezing code will write it * and writeprotect it again. */ set_page_dirty(page); if (inode->i_sb->s_frozen != SB_UNFROZEN) { ret = -EAGAIN; goto out_unlock; } wait_on_page_writeback(page); return 0; out_unlock: unlock_page(page); return ret; } EXPORT_SYMBOL(__block_page_mkwrite); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { int ret; struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; /* * This check is racy but catches the common case. The check in * __block_page_mkwrite() is reliable. */ vfs_check_frozen(sb, SB_FREEZE_WRITE); ret = __block_page_mkwrite(vma, vmf, get_block); return block_page_mkwrite_return(ret); } EXPORT_SYMBOL(block_page_mkwrite); /* * nobh_write_begin()'s prereads are special: the buffer_heads are freed * immediately, while under the page lock. So it needs a special end_io * handler which does not touch the bh after unlocking it. */ static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) { __end_buffer_read_notouch(bh, uptodate); } /* * Attach the singly-linked list of buffers created by nobh_write_begin, to * the page (converting it to circular linked list and taking care of page * dirty races). */ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) { struct buffer_head *bh; BUG_ON(!PageLocked(page)); spin_lock(&page->mapping->private_lock); bh = head; do { if (PageDirty(page)) set_buffer_dirty(bh); if (!bh->b_this_page) bh->b_this_page = head; bh = bh->b_this_page; } while (bh != head); attach_page_buffers(page, head); spin_unlock(&page->mapping->private_lock); } /* * On entry, the page is fully not uptodate. * On exit the page is fully uptodate in the areas outside (from,to) * The filesystem needs to handle block truncation upon failure. */ int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block) { struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; struct buffer_head *head, *bh; struct page *page; pgoff_t index; unsigned from, to; unsigned block_in_page; unsigned block_start, block_end; sector_t block_in_file; int nr_reads = 0; int ret = 0; int is_mapped_to_disk = 1; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; *fsdata = NULL; if (page_has_buffers(page)) { ret = __block_write_begin(page, pos, len, get_block); if (unlikely(ret)) goto out_release; return ret; } if (PageMappedToDisk(page)) return 0; /* * Allocate buffers so that we can keep track of state, and potentially * attach them to the page if an error occurs. In the common case of * no error, they will just be freed again without ever being attached * to the page (which is all OK, because we're under the page lock). * * Be careful: the buffer linked list is a NULL terminated one, rather * than the circular one we're used to. */ head = alloc_page_buffers(page, blocksize, 0); if (!head) { ret = -ENOMEM; goto out_release; } block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); /* * We loop across all blocks in the page, whether or not they are * part of the affected region. This is so we can discover if the * page is fully mapped-to-disk. */ for (block_start = 0, block_in_page = 0, bh = head; block_start < PAGE_CACHE_SIZE; block_in_page++, block_start += blocksize, bh = bh->b_this_page) { int create; block_end = block_start + blocksize; bh->b_state = 0; create = 1; if (block_start >= to) create = 0; ret = get_block(inode, block_in_file + block_in_page, bh, create); if (ret) goto failed; if (!buffer_mapped(bh)) is_mapped_to_disk = 0; if (buffer_new(bh)) unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { set_buffer_uptodate(bh); continue; } if (buffer_new(bh) || !buffer_mapped(bh)) { zero_user_segments(page, block_start, from, to, block_end); continue; } if (buffer_uptodate(bh)) continue; /* reiserfs does this */ if (block_start < from || block_end > to) { lock_buffer(bh); bh->b_end_io = end_buffer_read_nobh; submit_bh(READ, bh); nr_reads++; } } if (nr_reads) { /* * The page is locked, so these buffers are protected from * any VM or truncate activity. Hence we don't need to care * for the buffer_head refcounts. */ for (bh = head; bh; bh = bh->b_this_page) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) ret = -EIO; } if (ret) goto failed; } if (is_mapped_to_disk) SetPageMappedToDisk(page); *fsdata = head; /* to be released by nobh_write_end */ return 0; failed: BUG_ON(!ret); /* * Error recovery is a bit difficult. We need to zero out blocks that * were newly allocated, and dirty them to ensure they get written out. * Buffers need to be attached to the page at this point, otherwise * the handling of potential IO errors during writeout would be hard * (could try doing synchronous writeout, but what if that fails too?) */ attach_nobh_buffers(page, head); page_zero_new_buffers(page, from, to); out_release: unlock_page(page); page_cache_release(page); *pagep = NULL; return ret; } EXPORT_SYMBOL(nobh_write_begin); int nobh_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct buffer_head *head = fsdata; struct buffer_head *bh; BUG_ON(fsdata != NULL && page_has_buffers(page)); if (unlikely(copied < len) && head) attach_nobh_buffers(page, head); if (page_has_buffers(page)) return generic_write_end(file, mapping, pos, len, copied, page, fsdata); SetPageUptodate(page); set_page_dirty(page); if (pos+copied > inode->i_size) { i_size_write(inode, pos+copied); mark_inode_dirty(inode); } unlock_page(page); page_cache_release(page); while (head) { bh = head; head = head->b_this_page; free_buffer_head(bh); } return copied; } EXPORT_SYMBOL(nobh_write_end); /* * nobh_writepage() - based on block_full_write_page() except * that it tries to operate without attaching bufferheads to * the page. */ int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; int ret; /* Is the page fully inside i_size? */ if (page->index < end_index) goto out; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ #if 0 /* Not really sure about this - do we need this ? */ if (page->mapping->a_ops->invalidatepage) page->mapping->a_ops->invalidatepage(page, offset); #endif unlock_page(page); return 0; /* don't care */ } /* * The page straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) ret = __block_write_full_page(inode, page, get_block, wbc, end_buffer_async_write); return ret; } EXPORT_SYMBOL(nobh_writepage); int nobh_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; struct inode *inode = mapping->host; struct page *page; struct buffer_head map_bh; int err; blocksize = 1 << inode->i_blkbits; length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ if (!length) return 0; length = blocksize - length; iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; if (!page) goto out; if (page_has_buffers(page)) { has_buffers: unlock_page(page); page_cache_release(page); return block_truncate_page(mapping, from, get_block); } /* Find the buffer that contains "offset" */ pos = blocksize; while (offset >= pos) { iblock++; pos += blocksize; } map_bh.b_size = blocksize; map_bh.b_state = 0; err = get_block(inode, iblock, &map_bh, 0); if (err) goto unlock; /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(&map_bh)) goto unlock; /* Ok, it's mapped. Make sure it's up-to-date */ if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); if (err) { page_cache_release(page); goto out; } lock_page(page); if (!PageUptodate(page)) { err = -EIO; goto unlock; } if (page_has_buffers(page)) goto has_buffers; } zero_user(page, offset, length); set_page_dirty(page); err = 0; unlock: unlock_page(page); page_cache_release(page); out: return err; } EXPORT_SYMBOL(nobh_truncate_page); int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; struct inode *inode = mapping->host; struct page *page; struct buffer_head *bh; int err; blocksize = 1 << inode->i_blkbits; length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ if (!length) return 0; length = blocksize - length; iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; if (!page) goto out; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } err = 0; if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) goto unlock; } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; } zero_user(page, offset, length); mark_buffer_dirty(bh); err = 0; unlock: unlock_page(page); page_cache_release(page); out: return err; } EXPORT_SYMBOL(block_truncate_page); /* * The generic ->writepage function for buffer-backed address_spaces * this form passes in the end_io handler used to finish the IO. */ int block_write_full_page_endio(struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; /* Is the page fully inside i_size? */ if (page->index < end_index) return __block_write_full_page(inode, page, get_block, wbc, handler); /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ do_invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } /* * The page straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); return __block_write_full_page(inode, page, get_block, wbc, handler); } EXPORT_SYMBOL(block_write_full_page_endio); /* * The generic ->writepage function for buffer-backed address_spaces */ int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc) { return block_write_full_page_endio(page, get_block, wbc, end_buffer_async_write); } EXPORT_SYMBOL(block_write_full_page); sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block_t *get_block) { struct buffer_head tmp; struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; tmp.b_size = 1 << inode->i_blkbits; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } EXPORT_SYMBOL(generic_block_bmap); static void end_bio_bh_io_sync(struct bio *bio, int err) { struct buffer_head *bh = bio->bi_private; if (err == -EOPNOTSUPP) { set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); } if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) set_bit(BH_Quiet, &bh->b_state); bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bio_put(bio); } int submit_bh(int rw, struct buffer_head * bh) { struct bio *bio; int ret = 0; BUG_ON(!buffer_locked(bh)); BUG_ON(!buffer_mapped(bh)); BUG_ON(!bh->b_end_io); BUG_ON(buffer_delay(bh)); BUG_ON(buffer_unwritten(bh)); /* * Only clear out a write error when rewriting */ if (test_set_buffer_req(bh) && (rw & WRITE)) clear_buffer_write_io_error(bh); /* * from here on down, it's all bio -- do the initial mapping, * submit_bio -> generic_make_request may further map this bio around */ bio = bio_alloc(GFP_NOIO, 1); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_io_vec[0].bv_page = bh->b_page; bio->bi_io_vec[0].bv_len = bh->b_size; bio->bi_io_vec[0].bv_offset = bh_offset(bh); bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = bh->b_size; bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; bio_get(bio); submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) ret = -EOPNOTSUPP; bio_put(bio); return ret; } EXPORT_SYMBOL(submit_bh); /** * ll_rw_block: low-level access to block devices (DEPRECATED) * @rw: whether to %READ or %WRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * * ll_rw_block() takes an array of pointers to &struct buffer_heads, and * requests an I/O operation on them, either a %READ or a %WRITE. The third * %READA option is described in the documentation for generic_make_request() * which ll_rw_block() calls. * * This function drops any buffer that it cannot get a lock on (with the * BH_Lock state bit), any buffer that appears to be clean when doing a write * request, and any buffer that appears to be up-to-date when doing read * request. Further it marks as clean buffers that are processed for * writing (the buffer cache won't assume that they are actually clean * until the buffer gets unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes * any waiters. * * All of the buffers must be for the same device, and must also be a * multiple of the current approved size for the device. */ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) { int i; for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (!trylock_buffer(bh)) continue; if (rw == WRITE) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(WRITE, bh); continue; } } else { if (!buffer_uptodate(bh)) { bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(rw, bh); continue; } } unlock_buffer(bh); } } EXPORT_SYMBOL(ll_rw_block); void write_dirty_buffer(struct buffer_head *bh, int rw) { lock_buffer(bh); if (!test_clear_buffer_dirty(bh)) { unlock_buffer(bh); return; } bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(rw, bh); } EXPORT_SYMBOL(write_dirty_buffer); /* * For a data-integrity writeout, we need to wait upon any in-progress I/O * and then start new I/O and then wait upon it. The caller must have a ref on * the buffer_head. */ int __sync_dirty_buffer(struct buffer_head *bh, int rw) { int ret = 0; WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(rw, bh); wait_on_buffer(bh); if (!ret && !buffer_uptodate(bh)) ret = -EIO; } else { unlock_buffer(bh); } return ret; } EXPORT_SYMBOL(__sync_dirty_buffer); int sync_dirty_buffer(struct buffer_head *bh) { return __sync_dirty_buffer(bh, WRITE_SYNC); } EXPORT_SYMBOL(sync_dirty_buffer); /* * try_to_free_buffers() checks if all the buffers on this particular page * are unused, and releases them if so. * * Exclusion against try_to_free_buffers may be obtained by either * locking the page or by holding its mapping's private_lock. * * If the page is dirty but all the buffers are clean then we need to * be sure to mark the page clean as well. This is because the page * may be against a block device, and a later reattachment of buffers * to a dirty page will set *all* buffers dirty. Which would corrupt * filesystem data on the same device. * * The same applies to regular filesystem pages: if all the buffers are * clean then we set the page clean and proceed. To do that, we require * total exclusion from __set_page_dirty_buffers(). That is obtained with * private_lock. * * try_to_free_buffers() is non-blocking. */ static inline int buffer_busy(struct buffer_head *bh) { return atomic_read(&bh->b_count) | (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); } static int drop_buffers(struct page *page, struct buffer_head **buffers_to_free) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh; bh = head; do { if (buffer_write_io_error(bh) && page->mapping) set_bit(AS_EIO, &page->mapping->flags); if (buffer_busy(bh)) { /* * Check if the busy failure was due to an * outstanding LRU reference */ evict_bh_lrus(bh); if (buffer_busy(bh)) goto failed; } bh = bh->b_this_page; } while (bh != head); do { struct buffer_head *next = bh->b_this_page; if (bh->b_assoc_map) __remove_assoc_queue(bh); bh = next; } while (bh != head); *buffers_to_free = head; __clear_page_buffers(page); return 1; failed: return 0; } int try_to_free_buffers(struct page *page) { struct address_space * const mapping = page->mapping; struct buffer_head *buffers_to_free = NULL; int ret = 0; BUG_ON(!PageLocked(page)); if (PageWriteback(page)) return 0; if (mapping == NULL) { /* can this still happen? */ ret = drop_buffers(page, &buffers_to_free); goto out; } spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); /* * If the filesystem writes its buffers by hand (eg ext3) * then we can have clean buffers against a dirty page. We * clean the page here; otherwise the VM will never notice * that the filesystem did any IO at all. * * Also, during truncate, discard_buffer will have marked all * the page's buffers clean. We discover that here and clean * the page also. * * private_lock must be held over this entire operation in order * to synchronise against __set_page_dirty_buffers and prevent the * dirty bit from being lost. */ if (ret) cancel_dirty_page(page, PAGE_CACHE_SIZE); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { struct buffer_head *bh = buffers_to_free; do { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } while (bh != buffers_to_free); } return ret; } EXPORT_SYMBOL(try_to_free_buffers); /* * There are no bdflush tunables left. But distributions are * still running obsolete flush daemons, so we terminate them here. * * Use of bdflush() is deprecated and will be removed in a future kernel. * The `flush-X' kernel threads fully replace bdflush daemons and this call. */ SYSCALL_DEFINE2(bdflush, int, func, long, data) { static int msg_count; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (msg_count < 5) { msg_count++; printk(KERN_INFO "warning: process `%s' used the obsolete bdflush" " system call\n", current->comm); printk(KERN_INFO "Fix your initscripts?\n"); } if (func == 1) do_exit(0); return 0; } /* * Buffer-head allocation */ static struct kmem_cache *bh_cachep; /* * Once the number of bh's in the machine exceeds this level, we start * stripping them in writeback. */ static int max_buffer_heads; int buffer_heads_over_limit; struct bh_accounting { int nr; /* Number of live bh's */ int ratelimit; /* Limit cacheline bouncing */ }; static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; static void recalc_bh_state(void) { int i; int tot = 0; if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) return; __this_cpu_write(bh_accounting.ratelimit, 0); for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } return ret; } EXPORT_SYMBOL(alloc_buffer_head); void free_buffer_head(struct buffer_head *bh) { BUG_ON(!list_empty(&bh->b_assoc_buffers)); kmem_cache_free(bh_cachep, bh); preempt_disable(); __this_cpu_dec(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } EXPORT_SYMBOL(free_buffer_head); static void buffer_exit_cpu(int cpu) { int i; struct bh_lru *b = &per_cpu(bh_lrus, cpu); for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); per_cpu(bh_accounting, cpu).nr = 0; } static int buffer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } /** * bh_uptodate_or_lock - Test whether the buffer is uptodate * @bh: struct buffer_head * * Return true if the buffer is up-to-date and false, * with the buffer locked, if not. */ int bh_uptodate_or_lock(struct buffer_head *bh) { if (!buffer_uptodate(bh)) { lock_buffer(bh); if (!buffer_uptodate(bh)) return 0; unlock_buffer(bh); } return 1; } EXPORT_SYMBOL(bh_uptodate_or_lock); /** * bh_submit_read - Submit a locked buffer for reading * @bh: struct buffer_head * * Returns zero on success and -EIO on error. */ int bh_submit_read(struct buffer_head *bh) { BUG_ON(!buffer_locked(bh)); if (buffer_uptodate(bh)) { unlock_buffer(bh); return 0; } get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return 0; return -EIO; } EXPORT_SYMBOL(bh_submit_read); void __init buffer_init(void) { int nrpages; bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| SLAB_MEM_SPREAD), NULL); /* * Limit the bh occupancy to 10% of ZONE_NORMAL */ nrpages = (nr_free_buffer_pages() * 10) / 100; max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); hotcpu_notifier(buffer_cpu_notify, 0); }
gpl-2.0
Snuzzo/ermahgerd_kernel_vigor
fs/omfs/inode.c
3220
13606
/* * Optimized MPEG FS - inode and super operations. * Copyright (C) 2006 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/vfs.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vmalloc.h> #include <linux/writeback.h> #include <linux/crc-itu-t.h> #include "omfs.h" MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>"); MODULE_DESCRIPTION("OMFS (ReplayTV/Karma) Filesystem for Linux"); MODULE_LICENSE("GPL"); struct buffer_head *omfs_bread(struct super_block *sb, sector_t block) { struct omfs_sb_info *sbi = OMFS_SB(sb); if (block >= sbi->s_num_blocks) return NULL; return sb_bread(sb, clus_to_blk(sbi, block)); } struct inode *omfs_new_inode(struct inode *dir, int mode) { struct inode *inode; u64 new_block; int err; int len; struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, &new_block, &len); if (err) goto fail; inode->i_ino = new_block; inode_init_owner(inode, NULL, mode); inode->i_mapping->a_ops = &omfs_aops; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { case S_IFDIR: inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case S_IFREG: inode->i_op = &omfs_file_inops; inode->i_fop = &omfs_file_operations; inode->i_size = 0; break; } insert_inode_hash(inode); mark_inode_dirty(inode); return inode; fail: make_bad_inode(inode); iput(inode); return ERR_PTR(err); } /* * Update the header checksums for a dirty inode based on its contents. * Caller is expected to hold the buffer head underlying oi and mark it * dirty. */ static void omfs_update_checksums(struct omfs_inode *oi) { int xor, i, ofs = 0, count; u16 crc = 0; unsigned char *ptr = (unsigned char *) oi; count = be32_to_cpu(oi->i_head.h_body_size); ofs = sizeof(struct omfs_header); crc = crc_itu_t(crc, ptr + ofs, count); oi->i_head.h_crc = cpu_to_be16(crc); xor = ptr[0]; for (i = 1; i < OMFS_XOR_COUNT; i++) xor ^= ptr[i]; oi->i_head.h_check_xor = xor; } static int __omfs_write_inode(struct inode *inode, int wait) { struct omfs_inode *oi; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct buffer_head *bh, *bh2; u64 ctime; int i; int ret = -EIO; int sync_failed = 0; /* get current inode since we may have written sibling ptrs etc. */ bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); if (S_ISDIR(inode->i_mode)) oi->i_type = OMFS_DIR; else if (S_ISREG(inode->i_mode)) oi->i_type = OMFS_FILE; else { printk(KERN_WARNING "omfs: unknown file type: %d\n", inode->i_mode); goto out_brelse; } oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - sizeof(struct omfs_header)); oi->i_head.h_version = 1; oi->i_head.h_type = OMFS_INODE_NORMAL; oi->i_head.h_magic = OMFS_IMAGIC; oi->i_size = cpu_to_be64(inode->i_size); ctime = inode->i_ctime.tv_sec * 1000LL + ((inode->i_ctime.tv_nsec + 999)/1000); oi->i_ctime = cpu_to_be64(ctime); omfs_update_checksums(oi); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) sync_failed = 1; } /* if mirroring writes, copy to next fsblock */ for (i = 1; i < sbi->s_mirrors; i++) { bh2 = omfs_bread(inode->i_sb, inode->i_ino + i); if (!bh2) goto out_brelse; memcpy(bh2->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(bh2); if (wait) { sync_dirty_buffer(bh2); if (buffer_req(bh2) && !buffer_uptodate(bh2)) sync_failed = 1; } brelse(bh2); } ret = (sync_failed) ? -EIO : 0; out_brelse: brelse(bh); out: return ret; } static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc) { return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int omfs_sync_inode(struct inode *inode) { return __omfs_write_inode(inode, 1); } /* * called when an entry is deleted, need to clear the bits in the * bitmaps. */ static void omfs_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (inode->i_nlink) return; if (S_ISREG(inode->i_mode)) { inode->i_size = 0; omfs_shrink_inode(inode); } omfs_clear_range(inode->i_sb, inode->i_ino, 2); } struct inode *omfs_iget(struct super_block *sb, ino_t ino) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct omfs_inode *oi; struct buffer_head *bh; u64 ctime; unsigned long nsecs; struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; bh = omfs_bread(inode->i_sb, ino); if (!bh) goto iget_failed; oi = (struct omfs_inode *)bh->b_data; /* check self */ if (ino != be64_to_cpu(oi->i_head.h_self)) goto fail_bh; inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; ctime = be64_to_cpu(oi->i_ctime); nsecs = do_div(ctime, 1000) * 1000L; inode->i_atime.tv_sec = ctime; inode->i_mtime.tv_sec = ctime; inode->i_ctime.tv_sec = ctime; inode->i_atime.tv_nsec = nsecs; inode->i_mtime.tv_nsec = nsecs; inode->i_ctime.tv_nsec = nsecs; inode->i_mapping->a_ops = &omfs_aops; switch (oi->i_type) { case OMFS_DIR: inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask); inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case OMFS_FILE: inode->i_mode = S_IFREG | (S_IRWXUGO & ~sbi->s_fmask); inode->i_fop = &omfs_file_operations; inode->i_size = be64_to_cpu(oi->i_size); break; } brelse(bh); unlock_new_inode(inode); return inode; fail_bh: brelse(bh); iget_failed: iget_failed(inode); return ERR_PTR(-EIO); } static void omfs_put_super(struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); kfree(sbi->s_imap); kfree(sbi); sb->s_fs_info = NULL; } static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct omfs_sb_info *sbi = OMFS_SB(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); buf->f_type = OMFS_MAGIC; buf->f_bsize = sbi->s_blocksize; buf->f_blocks = sbi->s_num_blocks; buf->f_files = sbi->s_num_blocks; buf->f_namelen = OMFS_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_bfree = buf->f_bavail = buf->f_ffree = omfs_count_free(s); return 0; } static const struct super_operations omfs_sops = { .write_inode = omfs_write_inode, .evict_inode = omfs_evict_inode, .put_super = omfs_put_super, .statfs = omfs_statfs, .show_options = generic_show_options, }; /* * For Rio Karma, there is an on-disk free bitmap whose location is * stored in the root block. For ReplayTV, there is no such free bitmap * so we have to walk the tree. Both inodes and file data are allocated * from the same map. This array can be big (300k) so we allocate * in units of the blocksize. */ static int omfs_get_imap(struct super_block *sb) { int bitmap_size; int array_size; int count; struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; unsigned long **ptr; sector_t block; bitmap_size = DIV_ROUND_UP(sbi->s_num_blocks, 8); array_size = DIV_ROUND_UP(bitmap_size, sb->s_blocksize); if (sbi->s_bitmap_ino == ~0ULL) goto out; sbi->s_imap_size = array_size; sbi->s_imap = kzalloc(array_size * sizeof(unsigned long *), GFP_KERNEL); if (!sbi->s_imap) goto nomem; block = clus_to_blk(sbi, sbi->s_bitmap_ino); if (block >= sbi->s_num_blocks) goto nomem; ptr = sbi->s_imap; for (count = bitmap_size; count > 0; count -= sb->s_blocksize) { bh = sb_bread(sb, block++); if (!bh) goto nomem_free; *ptr = kmalloc(sb->s_blocksize, GFP_KERNEL); if (!*ptr) { brelse(bh); goto nomem_free; } memcpy(*ptr, bh->b_data, sb->s_blocksize); if (count < sb->s_blocksize) memset((void *)*ptr + count, 0xff, sb->s_blocksize - count); brelse(bh); ptr++; } out: return 0; nomem_free: for (count = 0; count < array_size; count++) kfree(sbi->s_imap[count]); kfree(sbi->s_imap); nomem: sbi->s_imap = NULL; sbi->s_imap_size = 0; return -ENOMEM; } enum { Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%o"}, {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, }; static int parse_options(char *options, struct omfs_sb_info *sbi) { char *p; substring_t args[MAX_OPT_ARGS]; int option; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return 0; sbi->s_uid = option; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; sbi->s_gid = option; break; case Opt_umask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = sbi->s_dmask = option; break; case Opt_dmask: if (match_octal(&args[0], &option)) return 0; sbi->s_dmask = option; break; case Opt_fmask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = option; break; default: return 0; } } return 1; } static int omfs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh, *bh2; struct omfs_super_block *omfs_sb; struct omfs_root_block *omfs_rb; struct omfs_sb_info *sbi; struct inode *root; int ret = -EINVAL; save_mount_options(sb, (char *) data); sbi = kzalloc(sizeof(struct omfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->s_uid = current_uid(); sbi->s_gid = current_gid(); sbi->s_dmask = sbi->s_fmask = current_umask(); if (!parse_options((char *) data, sbi)) goto end; sb->s_maxbytes = 0xffffffff; sb_set_blocksize(sb, 0x200); bh = sb_bread(sb, 0); if (!bh) goto end; omfs_sb = (struct omfs_super_block *)bh->b_data; if (omfs_sb->s_magic != cpu_to_be32(OMFS_MAGIC)) { if (!silent) printk(KERN_ERR "omfs: Invalid superblock (%x)\n", omfs_sb->s_magic); goto out_brelse_bh; } sb->s_magic = OMFS_MAGIC; sbi->s_num_blocks = be64_to_cpu(omfs_sb->s_num_blocks); sbi->s_blocksize = be32_to_cpu(omfs_sb->s_blocksize); sbi->s_mirrors = be32_to_cpu(omfs_sb->s_mirrors); sbi->s_root_ino = be64_to_cpu(omfs_sb->s_root_block); sbi->s_sys_blocksize = be32_to_cpu(omfs_sb->s_sys_blocksize); mutex_init(&sbi->s_bitmap_lock); if (sbi->s_sys_blocksize > PAGE_SIZE) { printk(KERN_ERR "omfs: sysblock size (%d) is out of range\n", sbi->s_sys_blocksize); goto out_brelse_bh; } if (sbi->s_blocksize < sbi->s_sys_blocksize || sbi->s_blocksize > OMFS_MAX_BLOCK_SIZE) { printk(KERN_ERR "omfs: block size (%d) is out of range\n", sbi->s_blocksize); goto out_brelse_bh; } /* * Use sys_blocksize as the fs block since it is smaller than a * page while the fs blocksize can be larger. */ sb_set_blocksize(sb, sbi->s_sys_blocksize); /* * ...and the difference goes into a shift. sys_blocksize is always * a power of two factor of blocksize. */ sbi->s_block_shift = get_bitmask_order(sbi->s_blocksize) - get_bitmask_order(sbi->s_sys_blocksize); bh2 = omfs_bread(sb, be64_to_cpu(omfs_sb->s_root_block)); if (!bh2) goto out_brelse_bh; omfs_rb = (struct omfs_root_block *)bh2->b_data; sbi->s_bitmap_ino = be64_to_cpu(omfs_rb->r_bitmap); sbi->s_clustersize = be32_to_cpu(omfs_rb->r_clustersize); if (sbi->s_num_blocks != be64_to_cpu(omfs_rb->r_num_blocks)) { printk(KERN_ERR "omfs: block count discrepancy between " "super and root blocks (%llx, %llx)\n", (unsigned long long)sbi->s_num_blocks, (unsigned long long)be64_to_cpu(omfs_rb->r_num_blocks)); goto out_brelse_bh2; } if (sbi->s_bitmap_ino != ~0ULL && sbi->s_bitmap_ino > sbi->s_num_blocks) { printk(KERN_ERR "omfs: free space bitmap location is corrupt " "(%llx, total blocks %llx)\n", (unsigned long long) sbi->s_bitmap_ino, (unsigned long long) sbi->s_num_blocks); goto out_brelse_bh2; } if (sbi->s_clustersize < 1 || sbi->s_clustersize > OMFS_MAX_CLUSTER_SIZE) { printk(KERN_ERR "omfs: cluster size out of range (%d)", sbi->s_clustersize); goto out_brelse_bh2; } ret = omfs_get_imap(sb); if (ret) goto out_brelse_bh2; sb->s_op = &omfs_sops; root = omfs_iget(sb, be64_to_cpu(omfs_rb->r_root_dir)); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_brelse_bh2; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); goto out_brelse_bh2; } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; out_brelse_bh2: brelse(bh2); out_brelse_bh: brelse(bh); end: if (ret) kfree(sbi); return ret; } static struct dentry *omfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, omfs_fill_super); } static struct file_system_type omfs_fs_type = { .owner = THIS_MODULE, .name = "omfs", .mount = omfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_omfs_fs(void) { return register_filesystem(&omfs_fs_type); } static void __exit exit_omfs_fs(void) { unregister_filesystem(&omfs_fs_type); } module_init(init_omfs_fs); module_exit(exit_omfs_fs);
gpl-2.0
pinpong/enigma-i9100
drivers/usb/wusbcore/security.c
3988
16445
/* * Wireless USB Host Controller * Security support: encryption enablement, etc * * Copyright (C) 2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/types.h> #include <linux/slab.h> #include <linux/usb/ch9.h> #include <linux/random.h> #include "wusbhc.h" static void wusbhc_set_gtk_callback(struct urb *urb); static void wusbhc_gtk_rekey_done_work(struct work_struct *work); int wusbhc_sec_create(struct wusbhc *wusbhc) { wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; wusbhc->gtk.descr.bReserved = 0; wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); return 0; } /* Called when the HC is destroyed */ void wusbhc_sec_destroy(struct wusbhc *wusbhc) { } /** * wusbhc_next_tkid - generate a new, currently unused, TKID * @wusbhc: the WUSB host controller * @wusb_dev: the device whose PTK the TKID is for * (or NULL for a TKID for a GTK) * * The generated TKID consist of two parts: the device's authenicated * address (or 0 or a GTK); and an incrementing number. This ensures * that TKIDs cannot be shared between devices and by the time the * incrementing number wraps around the older TKIDs will no longer be * in use (a maximum of two keys may be active at any one time). */ static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { u32 *tkid; u32 addr; if (wusb_dev == NULL) { tkid = &wusbhc->gtk_tkid; addr = 0; } else { tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid; addr = wusb_dev->addr & 0x7f; } *tkid = (addr << 8) | ((*tkid + 1) & 0xff); return *tkid; } static void wusbhc_generate_gtk(struct wusbhc *wusbhc) { const size_t key_size = sizeof(wusbhc->gtk.data); u32 tkid; tkid = wusbhc_next_tkid(wusbhc, NULL); wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff; wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff; wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff; get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size); } /** * wusbhc_sec_start - start the security management process * @wusbhc: the WUSB host controller * * Generate and set an initial GTK on the host controller. * * Called when the HC is started. */ int wusbhc_sec_start(struct wusbhc *wusbhc) { const size_t key_size = sizeof(wusbhc->gtk.data); int result; wusbhc_generate_gtk(wusbhc); result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); if (result < 0) dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", result); return result; } /** * wusbhc_sec_stop - stop the security management process * @wusbhc: the WUSB host controller * * Wait for any pending GTK rekeys to stop. */ void wusbhc_sec_stop(struct wusbhc *wusbhc) { cancel_work_sync(&wusbhc->gtk_rekey_done_work); } /** @returns encryption type name */ const char *wusb_et_name(u8 x) { switch (x) { case USB_ENC_TYPE_UNSECURE: return "unsecure"; case USB_ENC_TYPE_WIRED: return "wired"; case USB_ENC_TYPE_CCM_1: return "CCM-1"; case USB_ENC_TYPE_RSA_1: return "RSA-1"; default: return "unknown"; } } EXPORT_SYMBOL_GPL(wusb_et_name); /* * Set the device encryption method * * We tell the device which encryption method to use; we do this when * setting up the device's security. */ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) { int result; struct device *dev = &usb_dev->dev; struct wusb_dev *wusb_dev = usb_dev->wusb_dev; if (value) { value = wusb_dev->ccm1_etd.bEncryptionValue; } else { /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */ value = 0; } /* Set device's */ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_ENCRYPTION, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, value, 0, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) dev_err(dev, "Can't set device's WUSB encryption to " "%s (value %d): %d\n", wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType), wusb_dev->ccm1_etd.bEncryptionValue, result); return result; } /* * Set the GTK to be used by a device. * * The device must be authenticated. */ static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct usb_device *usb_dev = wusb_dev->usb_dev; return usb_control_msg( usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_DESCRIPTOR, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, USB_DT_KEY << 8 | wusbhc->gtk_index, 0, &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, 1000); } /* FIXME: prototype for adding security */ int wusb_dev_sec_add(struct wusbhc *wusbhc, struct usb_device *usb_dev, struct wusb_dev *wusb_dev) { int result, bytes, secd_size; struct device *dev = &usb_dev->dev; struct usb_security_descriptor *secd; const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; const void *itr, *top; char buf[64]; secd = kmalloc(sizeof(*secd), GFP_KERNEL); if (secd == NULL) { result = -ENOMEM; goto out; } result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 0, secd, sizeof(*secd)); if (result < sizeof(*secd)) { dev_err(dev, "Can't read security descriptor or " "not enough data: %d\n", result); goto out; } secd_size = le16_to_cpu(secd->wTotalLength); secd = krealloc(secd, secd_size, GFP_KERNEL); if (secd == NULL) { dev_err(dev, "Can't allocate space for security descriptors\n"); goto out; } result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 0, secd, secd_size); if (result < secd_size) { dev_err(dev, "Can't read security descriptor or " "not enough data: %d\n", result); goto out; } bytes = 0; itr = &secd[1]; top = (void *)secd + result; while (itr < top) { etd = itr; if (top - itr < sizeof(*etd)) { dev_err(dev, "BUG: bad device security descriptor; " "not enough data (%zu vs %zu bytes left)\n", top - itr, sizeof(*etd)); break; } if (etd->bLength < sizeof(*etd)) { dev_err(dev, "BUG: bad device encryption descriptor; " "descriptor is too short " "(%u vs %zu needed)\n", etd->bLength, sizeof(*etd)); break; } itr += etd->bLength; bytes += snprintf(buf + bytes, sizeof(buf) - bytes, "%s (0x%02x/%02x) ", wusb_et_name(etd->bEncryptionType), etd->bEncryptionValue, etd->bAuthKeyIndex); if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1) ccm1_etd = etd; } /* This code only supports CCM1 as of now. */ /* FIXME: user has to choose which sec mode to use? * In theory we want CCM */ if (ccm1_etd == NULL) { dev_err(dev, "WUSB device doesn't support CCM1 encryption, " "can't use!\n"); result = -EINVAL; goto out; } wusb_dev->ccm1_etd = *ccm1_etd; dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", buf, wusb_et_name(ccm1_etd->bEncryptionType), ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); result = 0; out: kfree(secd); return result; } void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) { /* Nothing so far */ } /** * Update the address of an unauthenticated WUSB device * * Once we have successfully authenticated, we take it to addr0 state * and then to a normal address. * * Before the device's address (as known by it) was usb_dev->devnum | * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. */ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { int result = -ENOMEM; struct usb_device *usb_dev = wusb_dev->usb_dev; struct device *dev = &usb_dev->dev; u8 new_address = wusb_dev->addr & 0x7F; /* Set address 0 */ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_ADDRESS, 0, 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "auth failed: can't set address 0: %d\n", result); goto error_addr0; } result = wusb_set_dev_addr(wusbhc, wusb_dev, 0); if (result < 0) goto error_addr0; usb_set_device_state(usb_dev, USB_STATE_DEFAULT); usb_ep0_reinit(usb_dev); /* Set new (authenticated) address. */ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_ADDRESS, 0, new_address, 0, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "auth failed: can't set address %u: %d\n", new_address, result); goto error_addr; } result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address); if (result < 0) goto error_addr; usb_set_device_state(usb_dev, USB_STATE_ADDRESS); usb_ep0_reinit(usb_dev); usb_dev->authenticated = 1; error_addr: error_addr0: return result; } /* * * */ /* FIXME: split and cleanup */ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, struct wusb_ckhdid *ck) { int result = -ENOMEM; struct usb_device *usb_dev = wusb_dev->usb_dev; struct device *dev = &usb_dev->dev; u32 tkid; __le32 tkid_le; struct usb_handshake *hs; struct aes_ccm_nonce ccm_n; u8 mic[8]; struct wusb_keydvt_in keydvt_in; struct wusb_keydvt_out keydvt_out; hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL); if (hs == NULL) { dev_err(dev, "can't allocate handshake data\n"); goto error_kzalloc; } /* We need to turn encryption before beginning the 4way * hshake (WUSB1.0[.3.2.2]) */ result = wusb_dev_set_encryption(usb_dev, 1); if (result < 0) goto error_dev_set_encryption; tkid = wusbhc_next_tkid(wusbhc, wusb_dev); tkid_le = cpu_to_le32(tkid); hs[0].bMessageNumber = 1; hs[0].bStatus = 0; memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID)); hs[0].bReserved = 0; memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID)); get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ result = usb_control_msg( usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_HANDSHAKE, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "Handshake1: request failed: %d\n", result); goto error_hs1; } /* Handshake 2, from the device -- need to verify fields */ result = usb_control_msg( usb_dev, usb_rcvctrlpipe(usb_dev, 0), USB_REQ_GET_HANDSHAKE, USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "Handshake2: request failed: %d\n", result); goto error_hs2; } result = -EINVAL; if (hs[1].bMessageNumber != 2) { dev_err(dev, "Handshake2 failed: bad message number %u\n", hs[1].bMessageNumber); goto error_hs2; } if (hs[1].bStatus != 0) { dev_err(dev, "Handshake2 failed: bad status %u\n", hs[1].bStatus); goto error_hs2; } if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) { dev_err(dev, "Handshake2 failed: TKID mismatch " "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n", hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2], hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]); goto error_hs2; } if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) { dev_err(dev, "Handshake2 failed: CDID mismatch\n"); goto error_hs2; } /* Setup the CCM nonce */ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */ memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid)); ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr; ccm_n.dest_addr.data[0] = wusb_dev->addr; ccm_n.dest_addr.data[1] = 0; /* Derive the KCK and PTK from CK, the CCM, H and D nonces */ memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce)); memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce)); result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in); if (result < 0) { dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n", result); goto error_hs2; } /* Compute MIC and verify it */ result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); if (result < 0) { dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n", result); goto error_hs2; } if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { dev_err(dev, "Handshake2 failed: MIC mismatch\n"); goto error_hs2; } /* Send Handshake3 */ hs[2].bMessageNumber = 3; hs[2].bStatus = 0; memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID)); hs[2].bReserved = 0; memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID)); memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce)); result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]); if (result < 0) { dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n", result); goto error_hs2; } result = usb_control_msg( usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_HANDSHAKE, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "Handshake3: request failed: %d\n", result); goto error_hs3; } result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, keydvt_out.ptk, sizeof(keydvt_out.ptk)); if (result < 0) goto error_wusbhc_set_ptk; result = wusb_dev_set_gtk(wusbhc, wusb_dev); if (result < 0) { dev_err(dev, "Set GTK for device: request failed: %d\n", result); goto error_wusbhc_set_gtk; } /* Update the device's address from unauth to auth */ if (usb_dev->authenticated == 0) { result = wusb_dev_update_address(wusbhc, wusb_dev); if (result < 0) goto error_dev_update_address; } result = 0; dev_info(dev, "device authenticated\n"); error_dev_update_address: error_wusbhc_set_gtk: error_wusbhc_set_ptk: error_hs3: error_hs2: error_hs1: memset(hs, 0, 3*sizeof(hs[0])); memset(&keydvt_out, 0, sizeof(keydvt_out)); memset(&keydvt_in, 0, sizeof(keydvt_in)); memset(&ccm_n, 0, sizeof(ccm_n)); memset(mic, 0, sizeof(mic)); if (result < 0) wusb_dev_set_encryption(usb_dev, 0); error_dev_set_encryption: kfree(hs); error_kzalloc: return result; } /* * Once all connected and authenticated devices have received the new * GTK, switch the host to using it. */ static void wusbhc_gtk_rekey_done_work(struct work_struct *work) { struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); size_t key_size = sizeof(wusbhc->gtk.data); mutex_lock(&wusbhc->mutex); if (--wusbhc->pending_set_gtks == 0) wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); mutex_unlock(&wusbhc->mutex); } static void wusbhc_set_gtk_callback(struct urb *urb) { struct wusbhc *wusbhc = urb->context; queue_work(wusbd, &wusbhc->gtk_rekey_done_work); } /** * wusbhc_gtk_rekey - generate and distribute a new GTK * @wusbhc: the WUSB host controller * * Generate a new GTK and distribute it to all connected and * authenticated devices. When all devices have the new GTK, the host * starts using it. * * This must be called after every device disconnect (see [WUSB] * section 6.2.11.2). */ void wusbhc_gtk_rekey(struct wusbhc *wusbhc) { static const size_t key_size = sizeof(wusbhc->gtk.data); int p; wusbhc_generate_gtk(wusbhc); for (p = 0; p < wusbhc->ports_max; p++) { struct wusb_dev *wusb_dev; wusb_dev = wusbhc->port[p].wusb_dev; if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated) continue; usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, usb_sndctrlpipe(wusb_dev->usb_dev, 0), (void *)wusb_dev->set_gtk_req, &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, wusbhc_set_gtk_callback, wusbhc); if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) wusbhc->pending_set_gtks++; } if (wusbhc->pending_set_gtks == 0) wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); }
gpl-2.0
navsdev/kernel-nk1-negalite-lt02ltespr
drivers/platform/x86/acer-wmi.c
4244
50094
/* * Acer WMI Laptop Extras * * Copyright (C) 2007-2009 Carlos Corbacho <carlos@strangeworlds.co.uk> * * Based on acer_acpi: * Copyright (C) 2005-2007 E.M. Smith * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/i8042.h> #include <linux/rfkill.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <acpi/acpi_drivers.h> #include <acpi/video.h> MODULE_AUTHOR("Carlos Corbacho"); MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); MODULE_LICENSE("GPL"); /* * Magic Number * Meaning is unknown - this number is required for writing to ACPI for AMW0 * (it's also used in acerhk when directly accessing the BIOS) */ #define ACER_AMW0_WRITE 0x9610 /* * Bit masks for the AMW0 interface */ #define ACER_AMW0_WIRELESS_MASK 0x35 #define ACER_AMW0_BLUETOOTH_MASK 0x34 #define ACER_AMW0_MAILLED_MASK 0x31 /* * Method IDs for WMID interface */ #define ACER_WMID_GET_WIRELESS_METHODID 1 #define ACER_WMID_GET_BLUETOOTH_METHODID 2 #define ACER_WMID_GET_BRIGHTNESS_METHODID 3 #define ACER_WMID_SET_WIRELESS_METHODID 4 #define ACER_WMID_SET_BLUETOOTH_METHODID 5 #define ACER_WMID_SET_BRIGHTNESS_METHODID 6 #define ACER_WMID_GET_THREEG_METHODID 10 #define ACER_WMID_SET_THREEG_METHODID 11 /* * Acer ACPI method GUIDs */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A" #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" /* * Acer ACPI event GUIDs */ #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { WMID_HOTKEY_EVENT = 0x1, }; static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x4d, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, {KE_IGNORE, 0x4e, {KEY_NEXTSONG} }, {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} }, {KE_IGNORE, 0x4f, {KEY_PLAYPAUSE} }, {KE_IGNORE, 0x45, {KEY_STOP} }, {KE_IGNORE, 0x50, {KEY_STOP} }, {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ {KE_IGNORE, 0x81, {KEY_SLEEP} }, {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, {KE_END, 0} }; static struct input_dev *acer_wmi_input_dev; struct event_return_value { u8 function; u8 key_num; u16 device_state; u32 reserved; } __attribute__((packed)); /* * GUID3 Get Device Status device flags */ #define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */ #define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */ #define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */ #define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ struct lm_input_params { u8 function_num; /* Function Number */ u16 commun_devices; /* Communication type devices default status */ u16 devices; /* Other type devices default status */ u8 lm_status; /* Launch Manager Status */ u16 reserved; } __attribute__((packed)); struct lm_return_value { u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 reserved; } __attribute__((packed)); struct wmid3_gds_set_input_param { /* Set Device Status input parameter */ u8 function_num; /* Function Number */ u8 hotkey_number; /* Hotkey Number */ u16 devices; /* Set Device */ u8 volume_value; /* Volume Value */ } __attribute__((packed)); struct wmid3_gds_get_input_param { /* Get Device Status input parameter */ u8 function_num; /* Function Number */ u8 hotkey_number; /* Hotkey Number */ u16 devices; /* Get Device */ } __attribute__((packed)); struct wmid3_gds_return_value { /* Get Device Status return value*/ u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 devices; /* Current Device Status */ u32 reserved; } __attribute__((packed)); struct hotkey_function_type_aa { u8 type; u8 length; u16 handle; u16 commun_func_bitmap; u16 application_func_bitmap; u16 media_func_bitmap; u16 display_func_bitmap; u16 others_func_bitmap; u8 commun_fn_key_number; } __attribute__((packed)); /* * Interface capability flags */ #define ACER_CAP_MAILLED (1<<0) #define ACER_CAP_WIRELESS (1<<1) #define ACER_CAP_BLUETOOTH (1<<2) #define ACER_CAP_BRIGHTNESS (1<<3) #define ACER_CAP_THREEG (1<<4) #define ACER_CAP_ANY (0xFFFFFFFF) /* * Interface type flags */ enum interface_flags { ACER_AMW0, ACER_AMW0_V2, ACER_WMID, ACER_WMID_v2, }; #define ACER_DEFAULT_WIRELESS 0 #define ACER_DEFAULT_BLUETOOTH 0 #define ACER_DEFAULT_MAILLED 0 #define ACER_DEFAULT_THREEG 0 static int max_brightness = 0xF; static int mailled = -1; static int brightness = -1; static int threeg = -1; static int force_series; static bool ec_raw_mode; static bool has_type_aa; static u16 commun_func_bitmap; static u8 commun_fn_key_number; module_param(mailled, int, 0444); module_param(brightness, int, 0444); module_param(threeg, int, 0444); module_param(force_series, int, 0444); module_param(ec_raw_mode, bool, 0444); MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); MODULE_PARM_DESC(force_series, "Force a different laptop series"); MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); struct acer_data { int mailled; int threeg; int brightness; }; struct acer_debug { struct dentry *root; struct dentry *devices; u32 wmid_devices; }; static struct rfkill *wireless_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *threeg_rfkill; static bool rfkill_inited; /* Each low-level interface must define at least some of the following */ struct wmi_interface { /* The WMI device type */ u32 type; /* The capabilities this interface provides */ u32 capability; /* Private data for the current interface */ struct acer_data data; /* debugfs entries associated with this interface */ struct acer_debug debug; }; /* The static interface pointer, points to the currently detected interface */ static struct wmi_interface *interface; /* * Embedded Controller quirks * Some laptops require us to directly access the EC to either enable or query * features that are not available through WMI. */ struct quirk_entry { u8 wireless; u8 mailled; s8 brightness; u8 bluetooth; }; static struct quirk_entry *quirks; static void set_quirks(void) { if (!interface) return; if (quirks->mailled) interface->capability |= ACER_CAP_MAILLED; if (quirks->brightness) interface->capability |= ACER_CAP_BRIGHTNESS; } static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } static struct quirk_entry quirk_unknown = { }; static struct quirk_entry quirk_acer_aspire_1520 = { .brightness = -1, }; static struct quirk_entry quirk_acer_travelmate_2490 = { .mailled = 1, }; /* This AMW0 laptop has no bluetooth */ static struct quirk_entry quirk_medion_md_98300 = { .wireless = 1, }; static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { .wireless = 2, }; static struct quirk_entry quirk_lenovo_ideapad_s205 = { .wireless = 3, }; /* The Aspire One has a dummy ACPI-WMI interface - disable it */ static struct dmi_system_id __devinitdata acer_blacklist[] = { { .ident = "Acer Aspire One (SSD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), }, }, { .ident = "Acer Aspire One (HDD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, }, {} }; static struct dmi_system_id acer_quirks[] = { { .callback = dmi_matched, .ident = "Acer Aspire 1360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 1520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 3100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 3610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5630", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5650", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5680", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 9110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 2490", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 4200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Fujitsu Siemens Amilo Li 1718", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, { .callback = dmi_matched, .ident = "Medion MD 98300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"), }, .driver_data = &quirk_medion_md_98300, }, { .callback = dmi_matched, .ident = "Lenovo Ideapad S205", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), }, .driver_data = &quirk_lenovo_ideapad_s205, }, { .callback = dmi_matched, .ident = "Lenovo Ideapad S205 (Brazos)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "Brazos"), }, .driver_data = &quirk_lenovo_ideapad_s205, }, { .callback = dmi_matched, .ident = "Lenovo 3000 N200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, {} }; static int video_set_backlight_video_vendor(const struct dmi_system_id *d) { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by generic video driver\n"); return 0; } static const struct dmi_system_id video_vendor_dmi_table[] = { { .callback = video_set_backlight_video_vendor, .ident = "Acer TravelMate 4750", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), }, }, {} }; /* Find which quirks are needed for a particular vendor/ model pair */ static void find_quirks(void) { if (!force_series) { dmi_check_system(acer_quirks); } else if (force_series == 2490) { quirks = &quirk_acer_travelmate_2490; } if (quirks == NULL) quirks = &quirk_unknown; set_quirks(); } /* * General interface convenience methods */ static bool has_cap(u32 cap) { if ((interface->capability & cap) != 0) return 1; return 0; } /* * AMW0 (V1) interface */ struct wmab_args { u32 eax; u32 ebx; u32 ecx; u32 edx; }; struct wmab_ret { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 eex; }; static acpi_status wmab_execute(struct wmab_args *regbuf, struct acpi_buffer *result) { struct acpi_buffer input; acpi_status status; input.length = sizeof(struct wmab_args); input.pointer = (u8 *)regbuf; status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result); return status; } static acpi_status AMW0_get_u32(u32 *value, u32 cap) { int err; u8 result; switch (cap) { case ACER_CAP_MAILLED: switch (quirks->mailled) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 7) & 0x1; return AE_OK; } break; case ACER_CAP_WIRELESS: switch (quirks->wireless) { case 1: err = ec_read(0x7B, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 2: err = ec_read(0x71, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 3: err = ec_read(0x78, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 2) & 0x1; return AE_OK; } break; case ACER_CAP_BLUETOOTH: switch (quirks->bluetooth) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 4) & 0x1; return AE_OK; } break; case ACER_CAP_BRIGHTNESS: switch (quirks->brightness) { default: err = ec_read(0x83, &result); if (err) return AE_ERROR; *value = result; return AE_OK; } break; default: return AE_ERROR; } return AE_OK; } static acpi_status AMW0_set_u32(u32 value, u32 cap) { struct wmab_args args; args.eax = ACER_AMW0_WRITE; args.ebx = value ? (1<<8) : 0; args.ecx = args.edx = 0; switch (cap) { case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_MAILLED_MASK; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_WIRELESS_MASK; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; break; case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; switch (quirks->brightness) { default: return ec_write(0x83, value); break; } default: return AE_ERROR; } /* Actually do the set */ return wmab_execute(&args, NULL); } static acpi_status AMW0_find_mailled(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status = AE_OK; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; args.eax = 0x86; args.ebx = args.ecx = args.edx = 0; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { kfree(out.pointer); return AE_ERROR; } if (ret.eex & 0x1) interface->capability |= ACER_CAP_MAILLED; kfree(out.pointer); return AE_OK; } static int AMW0_set_cap_acpi_check_device_found; static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, u32 level, void *context, void **retval) { AMW0_set_cap_acpi_check_device_found = 1; return AE_OK; } static const struct acpi_device_id norfkill_ids[] = { { "VPC2004", 0}, { "IBM0068", 0}, { "LEN0068", 0}, { "SNY5001", 0}, /* sony-laptop in charge */ { "", 0}, }; static int AMW0_set_cap_acpi_check_device(void) { const struct acpi_device_id *id; for (id = norfkill_ids; id->id[0]; id++) acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb, NULL, NULL); return AMW0_set_cap_acpi_check_device_found; } static acpi_status AMW0_set_capabilities(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; /* * On laptops with this strange GUID (non Acer), normal probing doesn't * work. */ if (wmi_has_guid(AMW0_GUID2)) { if ((quirks != &quirk_unknown) || !AMW0_set_cap_acpi_check_device()) interface->capability |= ACER_CAP_WIRELESS; return AE_OK; } args.eax = ACER_AMW0_WRITE; args.ecx = args.edx = 0; args.ebx = 0xa2 << 8; args.ebx |= ACER_AMW0_WIRELESS_MASK; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_WIRELESS; args.ebx = 2 << 8; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; /* * It's ok to use existing buffer for next wmab_execute call. * But we need to kfree(out.pointer) if next wmab_execute fail. */ status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) goto out; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_BLUETOOTH; /* * This appears to be safe to enable, since all Wistron based laptops * appear to use the same EC register for brightness, even if they * differ for wireless, etc */ if (quirks->brightness >= 0) interface->capability |= ACER_CAP_BRIGHTNESS; status = AE_OK; out: kfree(out.pointer); return status; } static struct wmi_interface AMW0_interface = { .type = ACER_AMW0, }; static struct wmi_interface AMW0_V2_interface = { .type = ACER_AMW0_V2, }; /* * New interface (The WMID interface) */ static acpi_status WMI_execute_u32(u32 method_id, u32 in, u32 *out) { struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) }; struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; u32 tmp; acpi_status status; status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) result.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { tmp = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { tmp = (u32) obj->integer.value; } else { tmp = 0; } if (out) *out = tmp; kfree(result.pointer); return status; } static acpi_status WMID_get_u32(u32 *value, u32 cap) { acpi_status status; u8 tmp; u32 result, method_id = 0; switch (cap) { case ACER_CAP_WIRELESS: method_id = ACER_WMID_GET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: method_id = ACER_WMID_GET_BLUETOOTH_METHODID; break; case ACER_CAP_BRIGHTNESS: method_id = ACER_WMID_GET_BRIGHTNESS_METHODID; break; case ACER_CAP_THREEG: method_id = ACER_WMID_GET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (quirks->mailled == 1) { ec_read(0x9f, &tmp); *value = tmp & 0x1; return 0; } default: return AE_ERROR; } status = WMI_execute_u32(method_id, 0, &result); if (ACPI_SUCCESS(status)) *value = (u8)result; return status; } static acpi_status WMID_set_u32(u32 value, u32 cap) { u32 method_id = 0; char param; switch (cap) { case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BRIGHTNESS_METHODID; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BLUETOOTH_METHODID; break; case ACER_CAP_THREEG: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; if (quirks->mailled == 1) { param = value ? 0x92 : 0x93; i8042_lock_chip(); i8042_command(&param, 0x1059); i8042_unlock_chip(); return 0; } break; default: return AE_ERROR; } return WMI_execute_u32(method_id, (u32)value, NULL); } static acpi_status wmid3_get_device_status(u32 *value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; struct wmid3_gds_get_input_param params = { .function_num = 0x1, .hotkey_number = commun_fn_key_number, .devices = device, }; struct acpi_buffer input = { sizeof(struct wmid3_gds_get_input_param), &params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warn("Get 0x%x Device Status failed: 0x%x - 0x%x\n", device, return_value.error_code, return_value.ec_return_value); else *value = !!(return_value.devices & device); return status; } static acpi_status wmid_v2_get_u32(u32 *value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_get_device_status(value, device); } static acpi_status wmid3_set_device_status(u32 value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; u16 devices; struct wmid3_gds_get_input_param get_params = { .function_num = 0x1, .hotkey_number = commun_fn_key_number, .devices = commun_func_bitmap, }; struct acpi_buffer get_input = { sizeof(struct wmid3_gds_get_input_param), &get_params }; struct wmid3_gds_set_input_param set_params = { .function_num = 0x2, .hotkey_number = commun_fn_key_number, .devices = commun_func_bitmap, }; struct acpi_buffer set_input = { sizeof(struct wmid3_gds_set_input_param), &set_params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &get_input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) { pr_warn("Get Current Device Status failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } devices = return_value.devices; set_params.devices = (value) ? (devices | device) : (devices & ~device); status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &set_input, &output2); if (ACPI_FAILURE(status)) return status; obj = output2.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warn("Set Device Status failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static acpi_status wmid_v2_set_u32(u32 value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_set_device_status(value, device); } static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) { struct hotkey_function_type_aa *type_aa; /* We are looking for OEM-specific Type AAh */ if (header->type != 0xAA) return; has_type_aa = true; type_aa = (struct hotkey_function_type_aa *) header; pr_info("Function bitmap for Communication Button: 0x%x\n", type_aa->commun_func_bitmap); commun_func_bitmap = type_aa->commun_func_bitmap; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) interface->capability |= ACER_CAP_WIRELESS; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG) interface->capability |= ACER_CAP_THREEG; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) interface->capability |= ACER_CAP_BLUETOOTH; commun_fn_key_number = type_aa->commun_fn_key_number; } static acpi_status WMID_set_capabilities(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } else { kfree(out.pointer); return AE_ERROR; } pr_info("Function bitmap for Communication Device: 0x%x\n", devices); if (devices & 0x07) interface->capability |= ACER_CAP_WIRELESS; if (devices & 0x40) interface->capability |= ACER_CAP_THREEG; if (devices & 0x10) interface->capability |= ACER_CAP_BLUETOOTH; if (!(devices & 0x20)) max_brightness = 0x9; kfree(out.pointer); return status; } static struct wmi_interface wmid_interface = { .type = ACER_WMID, }; static struct wmi_interface wmid_v2_interface = { .type = ACER_WMID_v2, }; /* * Generic Device (interface-independent) */ static acpi_status get_u32(u32 *value, u32 cap) { acpi_status status = AE_ERROR; switch (interface->type) { case ACER_AMW0: status = AMW0_get_u32(value, cap); break; case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) { status = AMW0_get_u32(value, cap); break; } case ACER_WMID: status = WMID_get_u32(value, cap); break; case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) status = wmid_v2_get_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) status = WMID_get_u32(value, cap); break; } return status; } static acpi_status set_u32(u32 value, u32 cap) { acpi_status status; if (interface->capability & cap) { switch (interface->type) { case ACER_AMW0: return AMW0_set_u32(value, cap); case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) return AMW0_set_u32(value, cap); /* * On some models, some WMID methods don't toggle * properly. For those cases, we want to run the AMW0 * method afterwards to be certain we've really toggled * the device state. */ if (cap == ACER_CAP_WIRELESS || cap == ACER_CAP_BLUETOOTH) { status = WMID_set_u32(value, cap); if (ACPI_FAILURE(status)) return status; return AMW0_set_u32(value, cap); } case ACER_WMID: return WMID_set_u32(value, cap); case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) return wmid_v2_set_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) return WMID_set_u32(value, cap); default: return AE_BAD_PARAMETER; } } return AE_BAD_PARAMETER; } static void __init acer_commandline_init(void) { /* * These will all fail silently if the value given is invalid, or the * capability isn't available on the given interface */ if (mailled >= 0) set_u32(mailled, ACER_CAP_MAILLED); if (!has_type_aa && threeg >= 0) set_u32(threeg, ACER_CAP_THREEG); if (brightness >= 0) set_u32(brightness, ACER_CAP_BRIGHTNESS); } /* * LED device (Mail LED only, no other LEDs known yet) */ static void mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { set_u32(value, ACER_CAP_MAILLED); } static struct led_classdev mail_led = { .name = "acer-wmi::mail", .brightness_set = mail_led_set, }; static int __devinit acer_led_init(struct device *dev) { return led_classdev_register(dev, &mail_led); } static void acer_led_exit(void) { set_u32(LED_OFF, ACER_CAP_MAILLED); led_classdev_unregister(&mail_led); } /* * Backlight device */ static struct backlight_device *acer_backlight_device; static int read_brightness(struct backlight_device *bd) { u32 value; get_u32(&value, ACER_CAP_BRIGHTNESS); return value; } static int update_bl_status(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; set_u32(intensity, ACER_CAP_BRIGHTNESS); return 0; } static const struct backlight_ops acer_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; static int __devinit acer_backlight_init(struct device *dev) { struct backlight_properties props; struct backlight_device *bd; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register Acer backlight device\n"); acer_backlight_device = NULL; return PTR_ERR(bd); } acer_backlight_device = bd; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = read_brightness(bd); backlight_update_status(bd); return 0; } static void acer_backlight_exit(void) { backlight_device_unregister(acer_backlight_device); } /* * Rfkill devices */ static void acer_rfkill_update(struct work_struct *ignored); static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update); static void acer_rfkill_update(struct work_struct *ignored) { u32 state; acpi_status status; if (has_cap(ACER_CAP_WIRELESS)) { status = get_u32(&state, ACER_CAP_WIRELESS); if (ACPI_SUCCESS(status)) { if (quirks->wireless == 3) rfkill_set_hw_state(wireless_rfkill, !state); else rfkill_set_sw_state(wireless_rfkill, !state); } } if (has_cap(ACER_CAP_BLUETOOTH)) { status = get_u32(&state, ACER_CAP_BLUETOOTH); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(bluetooth_rfkill, !state); } if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) { status = get_u32(&state, ACER_WMID3_GDS_THREEG); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(threeg_rfkill, !state); } schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); } static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; if (rfkill_inited) { status = set_u32(!blocked, cap); if (ACPI_FAILURE(status)) return -ENODEV; } return 0; } static const struct rfkill_ops acer_rfkill_ops = { .set_block = acer_rfkill_set, }; static struct rfkill *acer_rfkill_register(struct device *dev, enum rfkill_type type, char *name, u32 cap) { int err; struct rfkill *rfkill_dev; u32 state; acpi_status status; rfkill_dev = rfkill_alloc(name, dev, type, &acer_rfkill_ops, (void *)(unsigned long)cap); if (!rfkill_dev) return ERR_PTR(-ENOMEM); status = get_u32(&state, cap); err = rfkill_register(rfkill_dev); if (err) { rfkill_destroy(rfkill_dev); return ERR_PTR(err); } if (ACPI_SUCCESS(status)) rfkill_set_sw_state(rfkill_dev, !state); return rfkill_dev; } static int acer_rfkill_init(struct device *dev) { int err; if (has_cap(ACER_CAP_WIRELESS)) { wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, "acer-wireless", ACER_CAP_WIRELESS); if (IS_ERR(wireless_rfkill)) { err = PTR_ERR(wireless_rfkill); goto error_wireless; } } if (has_cap(ACER_CAP_BLUETOOTH)) { bluetooth_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_BLUETOOTH, "acer-bluetooth", ACER_CAP_BLUETOOTH); if (IS_ERR(bluetooth_rfkill)) { err = PTR_ERR(bluetooth_rfkill); goto error_bluetooth; } } if (has_cap(ACER_CAP_THREEG)) { threeg_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WWAN, "acer-threeg", ACER_CAP_THREEG); if (IS_ERR(threeg_rfkill)) { err = PTR_ERR(threeg_rfkill); goto error_threeg; } } rfkill_inited = true; if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); return 0; error_threeg: if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } error_bluetooth: if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } error_wireless: return err; } static void acer_rfkill_exit(void) { if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) cancel_delayed_work_sync(&acer_rfkill_work); if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (has_cap(ACER_CAP_THREEG)) { rfkill_unregister(threeg_rfkill); rfkill_destroy(threeg_rfkill); } return; } /* * sysfs interface */ static ssize_t show_bool_threeg(struct device *dev, struct device_attribute *attr, char *buf) { u32 result; \ acpi_status status; pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", current->comm); status = get_u32(&result, ACER_CAP_THREEG); if (ACPI_SUCCESS(status)) return sprintf(buf, "%u\n", result); return sprintf(buf, "Read error\n"); } static ssize_t set_bool_threeg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 tmp = simple_strtoul(buf, NULL, 10); acpi_status status = set_u32(tmp, ACER_CAP_THREEG); pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", current->comm); if (ACPI_FAILURE(status)) return -EINVAL; return count; } static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, char *buf) { pr_info("This interface sysfs will be removed in 2012 - used by: %s\n", current->comm); switch (interface->type) { case ACER_AMW0: return sprintf(buf, "AMW0\n"); case ACER_AMW0_V2: return sprintf(buf, "AMW0 v2\n"); case ACER_WMID: return sprintf(buf, "WMID\n"); case ACER_WMID_v2: return sprintf(buf, "WMID v2\n"); default: return sprintf(buf, "Error!\n"); } } static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL); static void acer_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct event_return_value return_value; acpi_status status; u16 device_state; const struct key_entry *key; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_warn("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_warn("Unknown response received %d\n", obj->type); kfree(obj); return; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return; } return_value = *((struct event_return_value *)obj->buffer.pointer); kfree(obj); switch (return_value.function) { case WMID_HOTKEY_EVENT: device_state = return_value.device_state; pr_debug("device state: 0x%x\n", device_state); key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev, return_value.key_num); if (!key) { pr_warn("Unknown key number - 0x%x\n", return_value.key_num); } else { switch (key->keycode) { case KEY_WLAN: case KEY_BLUETOOTH: if (has_cap(ACER_CAP_WIRELESS)) rfkill_set_sw_state(wireless_rfkill, !(device_state & ACER_WMID3_GDS_WIRELESS)); if (has_cap(ACER_CAP_THREEG)) rfkill_set_sw_state(threeg_rfkill, !(device_state & ACER_WMID3_GDS_THREEG)); if (has_cap(ACER_CAP_BLUETOOTH)) rfkill_set_sw_state(bluetooth_rfkill, !(device_state & ACER_WMID3_GDS_BLUETOOTH)); break; } sparse_keymap_report_entry(acer_wmi_input_dev, key, 1, true); } break; default: pr_warn("Unknown function number - %d - %d\n", return_value.function, return_value.key_num); break; } } static acpi_status wmid3_set_lm_mode(struct lm_input_params *params, struct lm_return_value *return_value) { acpi_status status; union acpi_object *obj; struct acpi_buffer input = { sizeof(struct lm_input_params), params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } *return_value = *((struct lm_return_value *)obj->buffer.pointer); kfree(obj); return status; } static int acer_wmi_enable_ec_raw(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x00, /* Launch Manager Deactive */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); else pr_info("Enabled EC raw mode\n"); return status; } static int acer_wmi_enable_lm(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x01, /* Launch Manager Active */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static int __init acer_wmi_input_setup(void) { acpi_status status; int err; acer_wmi_input_dev = input_allocate_device(); if (!acer_wmi_input_dev) return -ENOMEM; acer_wmi_input_dev->name = "Acer WMI hotkeys"; acer_wmi_input_dev->phys = "wmi/input0"; acer_wmi_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL); if (err) goto err_free_dev; status = wmi_install_notify_handler(ACERWMID_EVENT_GUID, acer_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; goto err_free_keymap; } err = input_register_device(acer_wmi_input_dev); if (err) goto err_uninstall_notifier; return 0; err_uninstall_notifier: wmi_remove_notify_handler(ACERWMID_EVENT_GUID); err_free_keymap: sparse_keymap_free(acer_wmi_input_dev); err_free_dev: input_free_device(acer_wmi_input_dev); return err; } static void acer_wmi_input_destroy(void) { wmi_remove_notify_handler(ACERWMID_EVENT_GUID); sparse_keymap_free(acer_wmi_input_dev); input_unregister_device(acer_wmi_input_dev); } /* * debugfs functions */ static u32 get_wmid_devices(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices = 0; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return 0; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } kfree(out.pointer); return devices; } /* * Platform device */ static int __devinit acer_platform_probe(struct platform_device *device) { int err; if (has_cap(ACER_CAP_MAILLED)) { err = acer_led_init(&device->dev); if (err) goto error_mailled; } if (has_cap(ACER_CAP_BRIGHTNESS)) { err = acer_backlight_init(&device->dev); if (err) goto error_brightness; } err = acer_rfkill_init(&device->dev); if (err) goto error_rfkill; return err; error_rfkill: if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); error_brightness: if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); error_mailled: return err; } static int acer_platform_remove(struct platform_device *device) { if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); acer_rfkill_exit(); return 0; } static int acer_platform_suspend(struct platform_device *dev, pm_message_t state) { u32 value; struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) { get_u32(&value, ACER_CAP_MAILLED); set_u32(LED_OFF, ACER_CAP_MAILLED); data->mailled = value; } if (has_cap(ACER_CAP_BRIGHTNESS)) { get_u32(&value, ACER_CAP_BRIGHTNESS); data->brightness = value; } return 0; } static int acer_platform_resume(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) set_u32(data->mailled, ACER_CAP_MAILLED); if (has_cap(ACER_CAP_BRIGHTNESS)) set_u32(data->brightness, ACER_CAP_BRIGHTNESS); return 0; } static void acer_platform_shutdown(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return; if (has_cap(ACER_CAP_MAILLED)) set_u32(LED_OFF, ACER_CAP_MAILLED); } static struct platform_driver acer_platform_driver = { .driver = { .name = "acer-wmi", .owner = THIS_MODULE, }, .probe = acer_platform_probe, .remove = acer_platform_remove, .suspend = acer_platform_suspend, .resume = acer_platform_resume, .shutdown = acer_platform_shutdown, }; static struct platform_device *acer_platform_device; static int remove_sysfs(struct platform_device *device) { if (has_cap(ACER_CAP_THREEG)) device_remove_file(&device->dev, &dev_attr_threeg); device_remove_file(&device->dev, &dev_attr_interface); return 0; } static int create_sysfs(void) { int retval = -ENOMEM; if (has_cap(ACER_CAP_THREEG)) { retval = device_create_file(&acer_platform_device->dev, &dev_attr_threeg); if (retval) goto error_sysfs; } retval = device_create_file(&acer_platform_device->dev, &dev_attr_interface); if (retval) goto error_sysfs; return 0; error_sysfs: remove_sysfs(acer_platform_device); return retval; } static void remove_debugfs(void) { debugfs_remove(interface->debug.devices); debugfs_remove(interface->debug.root); } static int create_debugfs(void) { interface->debug.root = debugfs_create_dir("acer-wmi", NULL); if (!interface->debug.root) { pr_err("Failed to create debugfs directory"); return -ENOMEM; } interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, interface->debug.root, &interface->debug.wmid_devices); if (!interface->debug.devices) goto error_debugfs; return 0; error_debugfs: remove_debugfs(); return -ENOMEM; } static int __init acer_wmi_init(void) { int err; pr_info("Acer Laptop ACPI-WMI Extras\n"); if (dmi_check_system(acer_blacklist)) { pr_info("Blacklisted hardware detected - not loading\n"); return -ENODEV; } find_quirks(); /* * Detect which ACPI-WMI interface we're using. */ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &AMW0_V2_interface; if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &wmid_interface; if (wmi_has_guid(WMID_GUID3)) interface = &wmid_v2_interface; if (interface) dmi_walk(type_aa_dmi_decode, NULL); if (wmi_has_guid(WMID_GUID2) && interface) { if (!has_type_aa && ACPI_FAILURE(WMID_set_capabilities())) { pr_err("Unable to detect available WMID devices\n"); return -ENODEV; } /* WMID always provides brightness methods */ interface->capability |= ACER_CAP_BRIGHTNESS; } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { pr_err("No WMID device detection method found\n"); return -ENODEV; } if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) { interface = &AMW0_interface; if (ACPI_FAILURE(AMW0_set_capabilities())) { pr_err("Unable to detect available AMW0 devices\n"); return -ENODEV; } } if (wmi_has_guid(AMW0_GUID1)) AMW0_find_mailled(); if (!interface) { pr_err("No or unsupported WMI interface, unable to load\n"); return -ENODEV; } set_quirks(); if (acpi_video_backlight_support()) { if (dmi_check_system(video_vendor_dmi_table)) { acpi_video_unregister(); } else { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by " "acpi video driver\n"); } } if (wmi_has_guid(WMID_GUID3)) { if (ec_raw_mode) { if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { pr_err("Cannot enable EC raw mode\n"); return -ENODEV; } } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { pr_err("Cannot enable Launch Manager mode\n"); return -ENODEV; } } else if (ec_raw_mode) { pr_info("No WMID EC raw mode enable method\n"); } if (wmi_has_guid(ACERWMID_EVENT_GUID)) { err = acer_wmi_input_setup(); if (err) return err; } err = platform_driver_register(&acer_platform_driver); if (err) { pr_err("Unable to register platform driver\n"); goto error_platform_register; } acer_platform_device = platform_device_alloc("acer-wmi", -1); if (!acer_platform_device) { err = -ENOMEM; goto error_device_alloc; } err = platform_device_add(acer_platform_device); if (err) goto error_device_add; err = create_sysfs(); if (err) goto error_create_sys; if (wmi_has_guid(WMID_GUID2)) { interface->debug.wmid_devices = get_wmid_devices(); err = create_debugfs(); if (err) goto error_create_debugfs; } /* Override any initial settings with values from the commandline */ acer_commandline_init(); return 0; error_create_debugfs: remove_sysfs(acer_platform_device); error_create_sys: platform_device_del(acer_platform_device); error_device_add: platform_device_put(acer_platform_device); error_device_alloc: platform_driver_unregister(&acer_platform_driver); error_platform_register: if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); return err; } static void __exit acer_wmi_exit(void) { if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); remove_sysfs(acer_platform_device); remove_debugfs(); platform_device_unregister(acer_platform_device); platform_driver_unregister(&acer_platform_driver); pr_info("Acer Laptop WMI Extras unloaded\n"); return; } module_init(acer_wmi_init); module_exit(acer_wmi_exit);
gpl-2.0
gpillusion/Samsung_Kernel_Source_SM-G710K
drivers/md/dm-snap.c
4244
54158
/* * dm-snapshot.c * * Copyright (C) 2001-2002 Sistina Software (UK) Limited. * * This file is released under the GPL. */ #include <linux/blkdev.h> #include <linux/device-mapper.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/list.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/log2.h> #include <linux/dm-kcopyd.h> #include "dm-exception-store.h" #define DM_MSG_PREFIX "snapshots" static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; #define dm_target_is_snapshot_merge(ti) \ ((ti)->type->name == dm_snapshot_merge_target_name) /* * The size of the mempool used to track chunks in use. */ #define MIN_IOS 256 #define DM_TRACKED_CHUNK_HASH_SIZE 16 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) struct dm_exception_table { uint32_t hash_mask; unsigned hash_shift; struct list_head *table; }; struct dm_snapshot { struct rw_semaphore lock; struct dm_dev *origin; struct dm_dev *cow; struct dm_target *ti; /* List of snapshots per Origin */ struct list_head list; /* * You can't use a snapshot if this is 0 (e.g. if full). * A snapshot-merge target never clears this. */ int valid; /* Origin writes don't trigger exceptions until this is set */ int active; atomic_t pending_exceptions_count; mempool_t *pending_pool; struct dm_exception_table pending; struct dm_exception_table complete; /* * pe_lock protects all pending_exception operations and access * as well as the snapshot_bios list. */ spinlock_t pe_lock; /* Chunks with outstanding reads */ spinlock_t tracked_chunk_lock; mempool_t *tracked_chunk_pool; struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; /* The on disk metadata handler */ struct dm_exception_store *store; struct dm_kcopyd_client *kcopyd_client; /* Wait for events based on state_bits */ unsigned long state_bits; /* Range of chunks currently being merged. */ chunk_t first_merging_chunk; int num_merging_chunks; /* * The merge operation failed if this flag is set. * Failure modes are handled as follows: * - I/O error reading the header * => don't load the target; abort. * - Header does not have "valid" flag set * => use the origin; forget about the snapshot. * - I/O error when reading exceptions * => don't load the target; abort. * (We can't use the intermediate origin state.) * - I/O error while merging * => stop merging; set merge_failed; process I/O normally. */ int merge_failed; /* * Incoming bios that overlap with chunks being merged must wait * for them to be committed. */ struct bio_list bios_queued_during_merge; }; /* * state_bits: * RUNNING_MERGE - Merge operation is in progress. * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; * cleared afterwards. */ #define RUNNING_MERGE 0 #define SHUTDOWN_MERGE 1 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) { return s->origin; } EXPORT_SYMBOL(dm_snap_origin); struct dm_dev *dm_snap_cow(struct dm_snapshot *s) { return s->cow; } EXPORT_SYMBOL(dm_snap_cow); static sector_t chunk_to_sector(struct dm_exception_store *store, chunk_t chunk) { return chunk << store->chunk_shift; } static int bdev_equal(struct block_device *lhs, struct block_device *rhs) { /* * There is only ever one instance of a particular block * device so we can compare pointers safely. */ return lhs == rhs; } struct dm_snap_pending_exception { struct dm_exception e; /* * Origin buffers waiting for this to complete are held * in a bio list */ struct bio_list origin_bios; struct bio_list snapshot_bios; /* Pointer back to snapshot context */ struct dm_snapshot *snap; /* * 1 indicates the exception has already been sent to * kcopyd. */ int started; /* * For writing a complete chunk, bypassing the copy. */ struct bio *full_bio; bio_end_io_t *full_bio_end_io; void *full_bio_private; }; /* * Hash table mapping origin volumes to lists of snapshots and * a lock to protect it */ static struct kmem_cache *exception_cache; static struct kmem_cache *pending_cache; struct dm_snap_tracked_chunk { struct hlist_node node; chunk_t chunk; }; static struct kmem_cache *tracked_chunk_cache; static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, chunk_t chunk) { struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, GFP_NOIO); unsigned long flags; c->chunk = chunk; spin_lock_irqsave(&s->tracked_chunk_lock, flags); hlist_add_head(&c->node, &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); return c; } static void stop_tracking_chunk(struct dm_snapshot *s, struct dm_snap_tracked_chunk *c) { unsigned long flags; spin_lock_irqsave(&s->tracked_chunk_lock, flags); hlist_del(&c->node); spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); mempool_free(c, s->tracked_chunk_pool); } static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) { struct dm_snap_tracked_chunk *c; struct hlist_node *hn; int found = 0; spin_lock_irq(&s->tracked_chunk_lock); hlist_for_each_entry(c, hn, &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { if (c->chunk == chunk) { found = 1; break; } } spin_unlock_irq(&s->tracked_chunk_lock); return found; } /* * This conflicting I/O is extremely improbable in the caller, * so msleep(1) is sufficient and there is no need for a wait queue. */ static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) { while (__chunk_is_tracked(s, chunk)) msleep(1); } /* * One of these per registered origin, held in the snapshot_origins hash */ struct origin { /* The origin device */ struct block_device *bdev; struct list_head hash_list; /* List of snapshots for this origin */ struct list_head snapshots; }; /* * Size of the hash table for origin volumes. If we make this * the size of the minors list then it should be nearly perfect */ #define ORIGIN_HASH_SIZE 256 #define ORIGIN_MASK 0xFF static struct list_head *_origins; static struct rw_semaphore _origins_lock; static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); static uint64_t _pending_exceptions_done_count; static int init_origin_hash(void) { int i; _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), GFP_KERNEL); if (!_origins) { DMERR("unable to allocate memory"); return -ENOMEM; } for (i = 0; i < ORIGIN_HASH_SIZE; i++) INIT_LIST_HEAD(_origins + i); init_rwsem(&_origins_lock); return 0; } static void exit_origin_hash(void) { kfree(_origins); } static unsigned origin_hash(struct block_device *bdev) { return bdev->bd_dev & ORIGIN_MASK; } static struct origin *__lookup_origin(struct block_device *origin) { struct list_head *ol; struct origin *o; ol = &_origins[origin_hash(origin)]; list_for_each_entry (o, ol, hash_list) if (bdev_equal(o->bdev, origin)) return o; return NULL; } static void __insert_origin(struct origin *o) { struct list_head *sl = &_origins[origin_hash(o->bdev)]; list_add_tail(&o->hash_list, sl); } /* * _origins_lock must be held when calling this function. * Returns number of snapshots registered using the supplied cow device, plus: * snap_src - a snapshot suitable for use as a source of exception handover * snap_dest - a snapshot capable of receiving exception handover. * snap_merge - an existing snapshot-merge target linked to the same origin. * There can be at most one snapshot-merge target. The parameter is optional. * * Possible return values and states of snap_src and snap_dest. * 0: NULL, NULL - first new snapshot * 1: snap_src, NULL - normal snapshot * 2: snap_src, snap_dest - waiting for handover * 2: snap_src, NULL - handed over, waiting for old to be deleted * 1: NULL, snap_dest - source got destroyed without handover */ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, struct dm_snapshot **snap_src, struct dm_snapshot **snap_dest, struct dm_snapshot **snap_merge) { struct dm_snapshot *s; struct origin *o; int count = 0; int active; o = __lookup_origin(snap->origin->bdev); if (!o) goto out; list_for_each_entry(s, &o->snapshots, list) { if (dm_target_is_snapshot_merge(s->ti) && snap_merge) *snap_merge = s; if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) continue; down_read(&s->lock); active = s->active; up_read(&s->lock); if (active) { if (snap_src) *snap_src = s; } else if (snap_dest) *snap_dest = s; count++; } out: return count; } /* * On success, returns 1 if this snapshot is a handover destination, * otherwise returns 0. */ static int __validate_exception_handover(struct dm_snapshot *snap) { struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; struct dm_snapshot *snap_merge = NULL; /* Does snapshot need exceptions handed over to it? */ if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, &snap_merge) == 2) || snap_dest) { snap->ti->error = "Snapshot cow pairing for exception " "table handover failed"; return -EINVAL; } /* * If no snap_src was found, snap cannot become a handover * destination. */ if (!snap_src) return 0; /* * Non-snapshot-merge handover? */ if (!dm_target_is_snapshot_merge(snap->ti)) return 1; /* * Do not allow more than one merging snapshot. */ if (snap_merge) { snap->ti->error = "A snapshot is already merging."; return -EINVAL; } if (!snap_src->store->type->prepare_merge || !snap_src->store->type->commit_merge) { snap->ti->error = "Snapshot exception store does not " "support snapshot-merge."; return -EINVAL; } return 1; } static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) { struct dm_snapshot *l; /* Sort the list according to chunk size, largest-first smallest-last */ list_for_each_entry(l, &o->snapshots, list) if (l->store->chunk_size < s->store->chunk_size) break; list_add_tail(&s->list, &l->list); } /* * Make a note of the snapshot and its origin so we can look it * up when the origin has a write on it. * * Also validate snapshot exception store handovers. * On success, returns 1 if this registration is a handover destination, * otherwise returns 0. */ static int register_snapshot(struct dm_snapshot *snap) { struct origin *o, *new_o = NULL; struct block_device *bdev = snap->origin->bdev; int r = 0; new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); if (!new_o) return -ENOMEM; down_write(&_origins_lock); r = __validate_exception_handover(snap); if (r < 0) { kfree(new_o); goto out; } o = __lookup_origin(bdev); if (o) kfree(new_o); else { /* New origin */ o = new_o; /* Initialise the struct */ INIT_LIST_HEAD(&o->snapshots); o->bdev = bdev; __insert_origin(o); } __insert_snapshot(o, snap); out: up_write(&_origins_lock); return r; } /* * Move snapshot to correct place in list according to chunk size. */ static void reregister_snapshot(struct dm_snapshot *s) { struct block_device *bdev = s->origin->bdev; down_write(&_origins_lock); list_del(&s->list); __insert_snapshot(__lookup_origin(bdev), s); up_write(&_origins_lock); } static void unregister_snapshot(struct dm_snapshot *s) { struct origin *o; down_write(&_origins_lock); o = __lookup_origin(s->origin->bdev); list_del(&s->list); if (o && list_empty(&o->snapshots)) { list_del(&o->hash_list); kfree(o); } up_write(&_origins_lock); } /* * Implementation of the exception hash tables. * The lowest hash_shift bits of the chunk number are ignored, allowing * some consecutive chunks to be grouped together. */ static int dm_exception_table_init(struct dm_exception_table *et, uint32_t size, unsigned hash_shift) { unsigned int i; et->hash_shift = hash_shift; et->hash_mask = size - 1; et->table = dm_vcalloc(size, sizeof(struct list_head)); if (!et->table) return -ENOMEM; for (i = 0; i < size; i++) INIT_LIST_HEAD(et->table + i); return 0; } static void dm_exception_table_exit(struct dm_exception_table *et, struct kmem_cache *mem) { struct list_head *slot; struct dm_exception *ex, *next; int i, size; size = et->hash_mask + 1; for (i = 0; i < size; i++) { slot = et->table + i; list_for_each_entry_safe (ex, next, slot, hash_list) kmem_cache_free(mem, ex); } vfree(et->table); } static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) { return (chunk >> et->hash_shift) & et->hash_mask; } static void dm_remove_exception(struct dm_exception *e) { list_del(&e->hash_list); } /* * Return the exception data for a sector, or NULL if not * remapped. */ static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, chunk_t chunk) { struct list_head *slot; struct dm_exception *e; slot = &et->table[exception_hash(et, chunk)]; list_for_each_entry (e, slot, hash_list) if (chunk >= e->old_chunk && chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) return e; return NULL; } static struct dm_exception *alloc_completed_exception(void) { struct dm_exception *e; e = kmem_cache_alloc(exception_cache, GFP_NOIO); if (!e) e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); return e; } static void free_completed_exception(struct dm_exception *e) { kmem_cache_free(exception_cache, e); } static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) { struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, GFP_NOIO); atomic_inc(&s->pending_exceptions_count); pe->snap = s; return pe; } static void free_pending_exception(struct dm_snap_pending_exception *pe) { struct dm_snapshot *s = pe->snap; mempool_free(pe, s->pending_pool); smp_mb__before_atomic_dec(); atomic_dec(&s->pending_exceptions_count); } static void dm_insert_exception(struct dm_exception_table *eh, struct dm_exception *new_e) { struct list_head *l; struct dm_exception *e = NULL; l = &eh->table[exception_hash(eh, new_e->old_chunk)]; /* Add immediately if this table doesn't support consecutive chunks */ if (!eh->hash_shift) goto out; /* List is ordered by old_chunk */ list_for_each_entry_reverse(e, l, hash_list) { /* Insert after an existing chunk? */ if (new_e->old_chunk == (e->old_chunk + dm_consecutive_chunk_count(e) + 1) && new_e->new_chunk == (dm_chunk_number(e->new_chunk) + dm_consecutive_chunk_count(e) + 1)) { dm_consecutive_chunk_count_inc(e); free_completed_exception(new_e); return; } /* Insert before an existing chunk? */ if (new_e->old_chunk == (e->old_chunk - 1) && new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { dm_consecutive_chunk_count_inc(e); e->old_chunk--; e->new_chunk--; free_completed_exception(new_e); return; } if (new_e->old_chunk > e->old_chunk) break; } out: list_add(&new_e->hash_list, e ? &e->hash_list : l); } /* * Callback used by the exception stores to load exceptions when * initialising. */ static int dm_add_exception(void *context, chunk_t old, chunk_t new) { struct dm_snapshot *s = context; struct dm_exception *e; e = alloc_completed_exception(); if (!e) return -ENOMEM; e->old_chunk = old; /* Consecutive_count is implicitly initialised to zero */ e->new_chunk = new; dm_insert_exception(&s->complete, e); return 0; } /* * Return a minimum chunk size of all snapshots that have the specified origin. * Return zero if the origin has no snapshots. */ static sector_t __minimum_chunk_size(struct origin *o) { struct dm_snapshot *snap; unsigned chunk_size = 0; if (o) list_for_each_entry(snap, &o->snapshots, list) chunk_size = min_not_zero(chunk_size, snap->store->chunk_size); return chunk_size; } /* * Hard coded magic. */ static int calc_max_buckets(void) { /* use a fixed size of 2MB */ unsigned long mem = 2 * 1024 * 1024; mem /= sizeof(struct list_head); return mem; } /* * Allocate room for a suitable hash table. */ static int init_hash_tables(struct dm_snapshot *s) { sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; /* * Calculate based on the size of the original volume or * the COW volume... */ cow_dev_size = get_dev_size(s->cow->bdev); origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); if (hash_size < 64) hash_size = 64; hash_size = rounddown_pow_of_two(hash_size); if (dm_exception_table_init(&s->complete, hash_size, DM_CHUNK_CONSECUTIVE_BITS)) return -ENOMEM; /* * Allocate hash table for in-flight exceptions * Make this smaller than the real hash table */ hash_size >>= 3; if (hash_size < 64) hash_size = 64; if (dm_exception_table_init(&s->pending, hash_size, 0)) { dm_exception_table_exit(&s->complete, exception_cache); return -ENOMEM; } return 0; } static void merge_shutdown(struct dm_snapshot *s) { clear_bit_unlock(RUNNING_MERGE, &s->state_bits); smp_mb__after_clear_bit(); wake_up_bit(&s->state_bits, RUNNING_MERGE); } static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) { s->first_merging_chunk = 0; s->num_merging_chunks = 0; return bio_list_get(&s->bios_queued_during_merge); } /* * Remove one chunk from the index of completed exceptions. */ static int __remove_single_exception_chunk(struct dm_snapshot *s, chunk_t old_chunk) { struct dm_exception *e; e = dm_lookup_exception(&s->complete, old_chunk); if (!e) { DMERR("Corruption detected: exception for block %llu is " "on disk but not in memory", (unsigned long long)old_chunk); return -EINVAL; } /* * If this is the only chunk using this exception, remove exception. */ if (!dm_consecutive_chunk_count(e)) { dm_remove_exception(e); free_completed_exception(e); return 0; } /* * The chunk may be either at the beginning or the end of a * group of consecutive chunks - never in the middle. We are * removing chunks in the opposite order to that in which they * were added, so this should always be true. * Decrement the consecutive chunk counter and adjust the * starting point if necessary. */ if (old_chunk == e->old_chunk) { e->old_chunk++; e->new_chunk++; } else if (old_chunk != e->old_chunk + dm_consecutive_chunk_count(e)) { DMERR("Attempt to merge block %llu from the " "middle of a chunk range [%llu - %llu]", (unsigned long long)old_chunk, (unsigned long long)e->old_chunk, (unsigned long long) e->old_chunk + dm_consecutive_chunk_count(e)); return -EINVAL; } dm_consecutive_chunk_count_dec(e); return 0; } static void flush_bios(struct bio *bio); static int remove_single_exception_chunk(struct dm_snapshot *s) { struct bio *b = NULL; int r; chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; down_write(&s->lock); /* * Process chunks (and associated exceptions) in reverse order * so that dm_consecutive_chunk_count_dec() accounting works. */ do { r = __remove_single_exception_chunk(s, old_chunk); if (r) goto out; } while (old_chunk-- > s->first_merging_chunk); b = __release_queued_bios_after_merge(s); out: up_write(&s->lock); if (b) flush_bios(b); return r; } static int origin_write_extent(struct dm_snapshot *merging_snap, sector_t sector, unsigned chunk_size); static void merge_callback(int read_err, unsigned long write_err, void *context); static uint64_t read_pending_exceptions_done_count(void) { uint64_t pending_exceptions_done; spin_lock(&_pending_exceptions_done_spinlock); pending_exceptions_done = _pending_exceptions_done_count; spin_unlock(&_pending_exceptions_done_spinlock); return pending_exceptions_done; } static void increment_pending_exceptions_done_count(void) { spin_lock(&_pending_exceptions_done_spinlock); _pending_exceptions_done_count++; spin_unlock(&_pending_exceptions_done_spinlock); wake_up_all(&_pending_exceptions_done); } static void snapshot_merge_next_chunks(struct dm_snapshot *s) { int i, linear_chunks; chunk_t old_chunk, new_chunk; struct dm_io_region src, dest; sector_t io_size; uint64_t previous_count; BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) goto shut; /* * valid flag never changes during merge, so no lock required. */ if (!s->valid) { DMERR("Snapshot is invalid: can't merge"); goto shut; } linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk); if (linear_chunks <= 0) { if (linear_chunks < 0) { DMERR("Read error in exception store: " "shutting down merge"); down_write(&s->lock); s->merge_failed = 1; up_write(&s->lock); } goto shut; } /* Adjust old_chunk and new_chunk to reflect start of linear region */ old_chunk = old_chunk + 1 - linear_chunks; new_chunk = new_chunk + 1 - linear_chunks; /* * Use one (potentially large) I/O to copy all 'linear_chunks' * from the exception store to the origin */ io_size = linear_chunks * s->store->chunk_size; dest.bdev = s->origin->bdev; dest.sector = chunk_to_sector(s->store, old_chunk); dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); src.bdev = s->cow->bdev; src.sector = chunk_to_sector(s->store, new_chunk); src.count = dest.count; /* * Reallocate any exceptions needed in other snapshots then * wait for the pending exceptions to complete. * Each time any pending exception (globally on the system) * completes we are woken and repeat the process to find out * if we can proceed. While this may not seem a particularly * efficient algorithm, it is not expected to have any * significant impact on performance. */ previous_count = read_pending_exceptions_done_count(); while (origin_write_extent(s, dest.sector, io_size)) { wait_event(_pending_exceptions_done, (read_pending_exceptions_done_count() != previous_count)); /* Retry after the wait, until all exceptions are done. */ previous_count = read_pending_exceptions_done_count(); } down_write(&s->lock); s->first_merging_chunk = old_chunk; s->num_merging_chunks = linear_chunks; up_write(&s->lock); /* Wait until writes to all 'linear_chunks' drain */ for (i = 0; i < linear_chunks; i++) __check_for_conflicting_io(s, old_chunk + i); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); return; shut: merge_shutdown(s); } static void error_bios(struct bio *bio); static void merge_callback(int read_err, unsigned long write_err, void *context) { struct dm_snapshot *s = context; struct bio *b = NULL; if (read_err || write_err) { if (read_err) DMERR("Read error: shutting down merge."); else DMERR("Write error: shutting down merge."); goto shut; } if (s->store->type->commit_merge(s->store, s->num_merging_chunks) < 0) { DMERR("Write error in exception store: shutting down merge"); goto shut; } if (remove_single_exception_chunk(s) < 0) goto shut; snapshot_merge_next_chunks(s); return; shut: down_write(&s->lock); s->merge_failed = 1; b = __release_queued_bios_after_merge(s); up_write(&s->lock); error_bios(b); merge_shutdown(s); } static void start_merge(struct dm_snapshot *s) { if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) snapshot_merge_next_chunks(s); } static int wait_schedule(void *ptr) { schedule(); return 0; } /* * Stop the merging process and wait until it finishes. */ static void stop_merge(struct dm_snapshot *s) { set_bit(SHUTDOWN_MERGE, &s->state_bits); wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, TASK_UNINTERRUPTIBLE); clear_bit(SHUTDOWN_MERGE, &s->state_bits); } /* * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> */ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct dm_snapshot *s; int i; int r = -EINVAL; char *origin_path, *cow_path; unsigned args_used, num_flush_requests = 1; fmode_t origin_mode = FMODE_READ; if (argc != 4) { ti->error = "requires exactly 4 arguments"; r = -EINVAL; goto bad; } if (dm_target_is_snapshot_merge(ti)) { num_flush_requests = 2; origin_mode = FMODE_WRITE; } s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) { ti->error = "Cannot allocate private snapshot structure"; r = -ENOMEM; goto bad; } origin_path = argv[0]; argv++; argc--; r = dm_get_device(ti, origin_path, origin_mode, &s->origin); if (r) { ti->error = "Cannot get origin device"; goto bad_origin; } cow_path = argv[0]; argv++; argc--; r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); if (r) { ti->error = "Cannot get COW device"; goto bad_cow; } r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); if (r) { ti->error = "Couldn't create exception store"; r = -EINVAL; goto bad_store; } argv += args_used; argc -= args_used; s->ti = ti; s->valid = 1; s->active = 0; atomic_set(&s->pending_exceptions_count, 0); init_rwsem(&s->lock); INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); s->state_bits = 0; s->merge_failed = 0; s->first_merging_chunk = 0; s->num_merging_chunks = 0; bio_list_init(&s->bios_queued_during_merge); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { ti->error = "Unable to allocate hash table space"; r = -ENOMEM; goto bad_hash_tables; } s->kcopyd_client = dm_kcopyd_client_create(); if (IS_ERR(s->kcopyd_client)) { r = PTR_ERR(s->kcopyd_client); ti->error = "Could not create kcopyd client"; goto bad_kcopyd; } s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; goto bad_pending_pool; } s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, tracked_chunk_cache); if (!s->tracked_chunk_pool) { ti->error = "Could not allocate tracked_chunk mempool for " "tracking reads"; goto bad_tracked_chunk_pool; } for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); spin_lock_init(&s->tracked_chunk_lock); ti->private = s; ti->num_flush_requests = num_flush_requests; /* Add snapshot to the list of snapshots for this origin */ /* Exceptions aren't triggered till snapshot_resume() is called */ r = register_snapshot(s); if (r == -ENOMEM) { ti->error = "Snapshot origin struct allocation failed"; goto bad_load_and_register; } else if (r < 0) { /* invalid handover, register_snapshot has set ti->error */ goto bad_load_and_register; } /* * Metadata must only be loaded into one table at once, so skip this * if metadata will be handed over during resume. * Chunk size will be set during the handover - set it to zero to * ensure it's ignored. */ if (r > 0) { s->store->chunk_size = 0; return 0; } r = s->store->type->read_metadata(s->store, dm_add_exception, (void *)s); if (r < 0) { ti->error = "Failed to read snapshot metadata"; goto bad_read_metadata; } else if (r > 0) { s->valid = 0; DMWARN("Snapshot is marked invalid."); } if (!s->store->chunk_size) { ti->error = "Chunk size not set"; goto bad_read_metadata; } ti->split_io = s->store->chunk_size; return 0; bad_read_metadata: unregister_snapshot(s); bad_load_and_register: mempool_destroy(s->tracked_chunk_pool); bad_tracked_chunk_pool: mempool_destroy(s->pending_pool); bad_pending_pool: dm_kcopyd_client_destroy(s->kcopyd_client); bad_kcopyd: dm_exception_table_exit(&s->pending, pending_cache); dm_exception_table_exit(&s->complete, exception_cache); bad_hash_tables: dm_exception_store_destroy(s->store); bad_store: dm_put_device(ti, s->cow); bad_cow: dm_put_device(ti, s->origin); bad_origin: kfree(s); bad: return r; } static void __free_exceptions(struct dm_snapshot *s) { dm_kcopyd_client_destroy(s->kcopyd_client); s->kcopyd_client = NULL; dm_exception_table_exit(&s->pending, pending_cache); dm_exception_table_exit(&s->complete, exception_cache); } static void __handover_exceptions(struct dm_snapshot *snap_src, struct dm_snapshot *snap_dest) { union { struct dm_exception_table table_swap; struct dm_exception_store *store_swap; } u; /* * Swap all snapshot context information between the two instances. */ u.table_swap = snap_dest->complete; snap_dest->complete = snap_src->complete; snap_src->complete = u.table_swap; u.store_swap = snap_dest->store; snap_dest->store = snap_src->store; snap_src->store = u.store_swap; snap_dest->store->snap = snap_dest; snap_src->store->snap = snap_src; snap_dest->ti->split_io = snap_dest->store->chunk_size; snap_dest->valid = snap_src->valid; /* * Set source invalid to ensure it receives no further I/O. */ snap_src->valid = 0; } static void snapshot_dtr(struct dm_target *ti) { #ifdef CONFIG_DM_DEBUG int i; #endif struct dm_snapshot *s = ti->private; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; down_read(&_origins_lock); /* Check whether exception handover must be cancelled */ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest && (s == snap_src)) { down_write(&snap_dest->lock); snap_dest->valid = 0; up_write(&snap_dest->lock); DMERR("Cancelling snapshot handover."); } up_read(&_origins_lock); if (dm_target_is_snapshot_merge(ti)) stop_merge(s); /* Prevent further origin writes from using this snapshot. */ /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); while (atomic_read(&s->pending_exceptions_count)) msleep(1); /* * Ensure instructions in mempool_destroy aren't reordered * before atomic_read. */ smp_mb(); #ifdef CONFIG_DM_DEBUG for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); #endif mempool_destroy(s->tracked_chunk_pool); __free_exceptions(s); mempool_destroy(s->pending_pool); dm_exception_store_destroy(s->store); dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); kfree(s); } /* * Flush a list of buffers. */ static void flush_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; generic_make_request(bio); bio = n; } } static int do_origin(struct dm_dev *origin, struct bio *bio); /* * Flush a list of buffers. */ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) { struct bio *n; int r; while (bio) { n = bio->bi_next; bio->bi_next = NULL; r = do_origin(s->origin, bio); if (r == DM_MAPIO_REMAPPED) generic_make_request(bio); bio = n; } } /* * Error a list of buffers. */ static void error_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; bio_io_error(bio); bio = n; } } static void __invalidate_snapshot(struct dm_snapshot *s, int err) { if (!s->valid) return; if (err == -EIO) DMERR("Invalidating snapshot: Error reading/writing."); else if (err == -ENOMEM) DMERR("Invalidating snapshot: Unable to allocate exception."); if (s->store->type->drop_snapshot) s->store->type->drop_snapshot(s->store); s->valid = 0; dm_table_event(s->ti->table); } static void pending_complete(struct dm_snap_pending_exception *pe, int success) { struct dm_exception *e; struct dm_snapshot *s = pe->snap; struct bio *origin_bios = NULL; struct bio *snapshot_bios = NULL; struct bio *full_bio = NULL; int error = 0; if (!success) { /* Read/write error - snapshot is unusable */ down_write(&s->lock); __invalidate_snapshot(s, -EIO); error = 1; goto out; } e = alloc_completed_exception(); if (!e) { down_write(&s->lock); __invalidate_snapshot(s, -ENOMEM); error = 1; goto out; } *e = pe->e; down_write(&s->lock); if (!s->valid) { free_completed_exception(e); error = 1; goto out; } /* Check for conflicting reads */ __check_for_conflicting_io(s, pe->e.old_chunk); /* * Add a proper exception, and remove the * in-flight exception from the list. */ dm_insert_exception(&s->complete, e); out: dm_remove_exception(&pe->e); snapshot_bios = bio_list_get(&pe->snapshot_bios); origin_bios = bio_list_get(&pe->origin_bios); full_bio = pe->full_bio; if (full_bio) { full_bio->bi_end_io = pe->full_bio_end_io; full_bio->bi_private = pe->full_bio_private; } free_pending_exception(pe); increment_pending_exceptions_done_count(); up_write(&s->lock); /* Submit any pending write bios */ if (error) { if (full_bio) bio_io_error(full_bio); error_bios(snapshot_bios); } else { if (full_bio) bio_endio(full_bio, 0); flush_bios(snapshot_bios); } retry_origin_bios(s, origin_bios); } static void commit_callback(void *context, int success) { struct dm_snap_pending_exception *pe = context; pending_complete(pe, success); } /* * Called when the copy I/O has finished. kcopyd actually runs * this code so don't block. */ static void copy_callback(int read_err, unsigned long write_err, void *context) { struct dm_snap_pending_exception *pe = context; struct dm_snapshot *s = pe->snap; if (read_err || write_err) pending_complete(pe, 0); else /* Update the metadata if we are persistent */ s->store->type->commit_exception(s->store, &pe->e, commit_callback, pe); } /* * Dispatches the copy operation to kcopyd. */ static void start_copy(struct dm_snap_pending_exception *pe) { struct dm_snapshot *s = pe->snap; struct dm_io_region src, dest; struct block_device *bdev = s->origin->bdev; sector_t dev_size; dev_size = get_dev_size(bdev); src.bdev = bdev; src.sector = chunk_to_sector(s->store, pe->e.old_chunk); src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); dest.bdev = s->cow->bdev; dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); dest.count = src.count; /* Hand over to kcopyd */ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } static void full_bio_end_io(struct bio *bio, int error) { void *callback_data = bio->bi_private; dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0); } static void start_full_bio(struct dm_snap_pending_exception *pe, struct bio *bio) { struct dm_snapshot *s = pe->snap; void *callback_data; pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; pe->full_bio_private = bio->bi_private; callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); bio->bi_end_io = full_bio_end_io; bio->bi_private = callback_data; generic_make_request(bio); } static struct dm_snap_pending_exception * __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) { struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); if (!e) return NULL; return container_of(e, struct dm_snap_pending_exception, e); } /* * Looks to see if this snapshot already has a pending exception * for this chunk, otherwise it allocates a new one and inserts * it into the pending table. * * NOTE: a write lock must be held on snap->lock before calling * this. */ static struct dm_snap_pending_exception * __find_pending_exception(struct dm_snapshot *s, struct dm_snap_pending_exception *pe, chunk_t chunk) { struct dm_snap_pending_exception *pe2; pe2 = __lookup_pending_exception(s, chunk); if (pe2) { free_pending_exception(pe); return pe2; } pe->e.old_chunk = chunk; bio_list_init(&pe->origin_bios); bio_list_init(&pe->snapshot_bios); pe->started = 0; pe->full_bio = NULL; if (s->store->type->prepare_exception(s->store, &pe->e)) { free_pending_exception(pe); return NULL; } dm_insert_exception(&s->pending, &pe->e); return pe; } static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { bio->bi_bdev = s->cow->bdev; bio->bi_sector = chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + (chunk - e->old_chunk)) + (bio->bi_sector & s->store->chunk_mask); } static int snapshot_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct dm_exception *e; struct dm_snapshot *s = ti->private; int r = DM_MAPIO_REMAPPED; chunk_t chunk; struct dm_snap_pending_exception *pe = NULL; if (bio->bi_rw & REQ_FLUSH) { bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } chunk = sector_to_chunk(s->store, bio->bi_sector); /* Full snapshots are not usable */ /* To get here the table must be live so s->active is always set. */ if (!s->valid) return -EIO; /* FIXME: should only take write lock if we need * to copy an exception */ down_write(&s->lock); if (!s->valid) { r = -EIO; goto out_unlock; } /* If the block is already remapped - use that, else remap it */ e = dm_lookup_exception(&s->complete, chunk); if (e) { remap_exception(s, e, bio, chunk); goto out_unlock; } /* * Write to snapshot - higher level takes care of RW/RO * flags so we should only get this if we are * writeable. */ if (bio_rw(bio) == WRITE) { pe = __lookup_pending_exception(s, chunk); if (!pe) { up_write(&s->lock); pe = alloc_pending_exception(s); down_write(&s->lock); if (!s->valid) { free_pending_exception(pe); r = -EIO; goto out_unlock; } e = dm_lookup_exception(&s->complete, chunk); if (e) { free_pending_exception(pe); remap_exception(s, e, bio, chunk); goto out_unlock; } pe = __find_pending_exception(s, pe, chunk); if (!pe) { __invalidate_snapshot(s, -ENOMEM); r = -EIO; goto out_unlock; } } remap_exception(s, &pe->e, bio, chunk); r = DM_MAPIO_SUBMITTED; if (!pe->started && bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { pe->started = 1; up_write(&s->lock); start_full_bio(pe, bio); goto out; } bio_list_add(&pe->snapshot_bios, bio); if (!pe->started) { /* this is protected by snap->lock */ pe->started = 1; up_write(&s->lock); start_copy(pe); goto out; } } else { bio->bi_bdev = s->origin->bdev; map_context->ptr = track_chunk(s, chunk); } out_unlock: up_write(&s->lock); out: return r; } /* * A snapshot-merge target behaves like a combination of a snapshot * target and a snapshot-origin target. It only generates new * exceptions in other snapshots and not in the one that is being * merged. * * For each chunk, if there is an existing exception, it is used to * redirect I/O to the cow device. Otherwise I/O is sent to the origin, * which in turn might generate exceptions in other snapshots. * If merging is currently taking place on the chunk in question, the * I/O is deferred by adding it to s->bios_queued_during_merge. */ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct dm_exception *e; struct dm_snapshot *s = ti->private; int r = DM_MAPIO_REMAPPED; chunk_t chunk; if (bio->bi_rw & REQ_FLUSH) { if (!map_context->target_request_nr) bio->bi_bdev = s->origin->bdev; else bio->bi_bdev = s->cow->bdev; map_context->ptr = NULL; return DM_MAPIO_REMAPPED; } chunk = sector_to_chunk(s->store, bio->bi_sector); down_write(&s->lock); /* Full merging snapshots are redirected to the origin */ if (!s->valid) goto redirect_to_origin; /* If the block is already remapped - use that */ e = dm_lookup_exception(&s->complete, chunk); if (e) { /* Queue writes overlapping with chunks being merged */ if (bio_rw(bio) == WRITE && chunk >= s->first_merging_chunk && chunk < (s->first_merging_chunk + s->num_merging_chunks)) { bio->bi_bdev = s->origin->bdev; bio_list_add(&s->bios_queued_during_merge, bio); r = DM_MAPIO_SUBMITTED; goto out_unlock; } remap_exception(s, e, bio, chunk); if (bio_rw(bio) == WRITE) map_context->ptr = track_chunk(s, chunk); goto out_unlock; } redirect_to_origin: bio->bi_bdev = s->origin->bdev; if (bio_rw(bio) == WRITE) { up_write(&s->lock); return do_origin(s->origin, bio); } out_unlock: up_write(&s->lock); return r; } static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context) { struct dm_snapshot *s = ti->private; struct dm_snap_tracked_chunk *c = map_context->ptr; if (c) stop_tracking_chunk(s, c); return 0; } static void snapshot_merge_presuspend(struct dm_target *ti) { struct dm_snapshot *s = ti->private; stop_merge(s); } static int snapshot_preresume(struct dm_target *ti) { int r = 0; struct dm_snapshot *s = ti->private; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; down_read(&_origins_lock); (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest) { down_read(&snap_src->lock); if (s == snap_src) { DMERR("Unable to resume snapshot source until " "handover completes."); r = -EINVAL; } else if (!dm_suspended(snap_src->ti)) { DMERR("Unable to perform snapshot handover until " "source is suspended."); r = -EINVAL; } up_read(&snap_src->lock); } up_read(&_origins_lock); return r; } static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = ti->private; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; down_read(&_origins_lock); (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest) { down_write(&snap_src->lock); down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); __handover_exceptions(snap_src, snap_dest); up_write(&snap_dest->lock); up_write(&snap_src->lock); } up_read(&_origins_lock); /* Now we have correct chunk size, reregister */ reregister_snapshot(s); down_write(&s->lock); s->active = 1; up_write(&s->lock); } static sector_t get_origin_minimum_chunksize(struct block_device *bdev) { sector_t min_chunksize; down_read(&_origins_lock); min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); up_read(&_origins_lock); return min_chunksize; } static void snapshot_merge_resume(struct dm_target *ti) { struct dm_snapshot *s = ti->private; /* * Handover exceptions from existing snapshot. */ snapshot_resume(ti); /* * snapshot-merge acts as an origin, so set ti->split_io */ ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); start_merge(s); } static int snapshot_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { unsigned sz = 0; struct dm_snapshot *snap = ti->private; switch (type) { case STATUSTYPE_INFO: down_write(&snap->lock); if (!snap->valid) DMEMIT("Invalid"); else if (snap->merge_failed) DMEMIT("Merge failed"); else { if (snap->store->type->usage) { sector_t total_sectors, sectors_allocated, metadata_sectors; snap->store->type->usage(snap->store, &total_sectors, &sectors_allocated, &metadata_sectors); DMEMIT("%llu/%llu %llu", (unsigned long long)sectors_allocated, (unsigned long long)total_sectors, (unsigned long long)metadata_sectors); } else DMEMIT("Unknown"); } up_write(&snap->lock); break; case STATUSTYPE_TABLE: /* * kdevname returns a static pointer so we need * to make private copies if the output is to * make sense. */ DMEMIT("%s %s", snap->origin->name, snap->cow->name); snap->store->type->status(snap->store, type, result + sz, maxlen - sz); break; } return 0; } static int snapshot_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_snapshot *snap = ti->private; int r; r = fn(ti, snap->origin, 0, ti->len, data); if (!r) r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); return r; } /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ /* * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any * supplied bio was ignored. The caller may submit it immediately. * (No remapping actually occurs as the origin is always a direct linear * map.) * * If further exceptions are required, DM_MAPIO_SUBMITTED is returned * and any supplied bio is added to a list to be submitted once all * the necessary exceptions exist. */ static int __origin_write(struct list_head *snapshots, sector_t sector, struct bio *bio) { int r = DM_MAPIO_REMAPPED; struct dm_snapshot *snap; struct dm_exception *e; struct dm_snap_pending_exception *pe; struct dm_snap_pending_exception *pe_to_start_now = NULL; struct dm_snap_pending_exception *pe_to_start_last = NULL; chunk_t chunk; /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { /* * Don't make new exceptions in a merging snapshot * because it has effectively been deleted */ if (dm_target_is_snapshot_merge(snap->ti)) continue; down_write(&snap->lock); /* Only deal with valid and active snapshots */ if (!snap->valid || !snap->active) goto next_snapshot; /* Nothing to do if writing beyond end of snapshot */ if (sector >= dm_table_get_size(snap->ti->table)) goto next_snapshot; /* * Remember, different snapshots can have * different chunk sizes. */ chunk = sector_to_chunk(snap->store, sector); /* * Check exception table to see if block * is already remapped in this snapshot * and trigger an exception if not. */ e = dm_lookup_exception(&snap->complete, chunk); if (e) goto next_snapshot; pe = __lookup_pending_exception(snap, chunk); if (!pe) { up_write(&snap->lock); pe = alloc_pending_exception(snap); down_write(&snap->lock); if (!snap->valid) { free_pending_exception(pe); goto next_snapshot; } e = dm_lookup_exception(&snap->complete, chunk); if (e) { free_pending_exception(pe); goto next_snapshot; } pe = __find_pending_exception(snap, pe, chunk); if (!pe) { __invalidate_snapshot(snap, -ENOMEM); goto next_snapshot; } } r = DM_MAPIO_SUBMITTED; /* * If an origin bio was supplied, queue it to wait for the * completion of this exception, and start this one last, * at the end of the function. */ if (bio) { bio_list_add(&pe->origin_bios, bio); bio = NULL; if (!pe->started) { pe->started = 1; pe_to_start_last = pe; } } if (!pe->started) { pe->started = 1; pe_to_start_now = pe; } next_snapshot: up_write(&snap->lock); if (pe_to_start_now) { start_copy(pe_to_start_now); pe_to_start_now = NULL; } } /* * Submit the exception against which the bio is queued last, * to give the other exceptions a head start. */ if (pe_to_start_last) start_copy(pe_to_start_last); return r; } /* * Called on a write from the origin driver. */ static int do_origin(struct dm_dev *origin, struct bio *bio) { struct origin *o; int r = DM_MAPIO_REMAPPED; down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) r = __origin_write(&o->snapshots, bio->bi_sector, bio); up_read(&_origins_lock); return r; } /* * Trigger exceptions in all non-merging snapshots. * * The chunk size of the merging snapshot may be larger than the chunk * size of some other snapshot so we may need to reallocate multiple * chunks in other snapshots. * * We scan all the overlapping exceptions in the other snapshots. * Returns 1 if anything was reallocated and must be waited for, * otherwise returns 0. * * size must be a multiple of merging_snap's chunk_size. */ static int origin_write_extent(struct dm_snapshot *merging_snap, sector_t sector, unsigned size) { int must_wait = 0; sector_t n; struct origin *o; /* * The origin's __minimum_chunk_size() got stored in split_io * by snapshot_merge_resume(). */ down_read(&_origins_lock); o = __lookup_origin(merging_snap->origin->bdev); for (n = 0; n < size; n += merging_snap->ti->split_io) if (__origin_write(&o->snapshots, sector + n, NULL) == DM_MAPIO_SUBMITTED) must_wait = 1; up_read(&_origins_lock); return must_wait; } /* * Origin: maps a linear range of a device, with hooks for snapshotting. */ /* * Construct an origin mapping: <dev_path> * The context for an origin is merely a 'struct dm_dev *' * pointing to the real device. */ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) { int r; struct dm_dev *dev; if (argc != 1) { ti->error = "origin: incorrect number of arguments"; return -EINVAL; } r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); if (r) { ti->error = "Cannot get target device"; return r; } ti->private = dev; ti->num_flush_requests = 1; return 0; } static void origin_dtr(struct dm_target *ti) { struct dm_dev *dev = ti->private; dm_put_device(ti, dev); } static int origin_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct dm_dev *dev = ti->private; bio->bi_bdev = dev->bdev; if (bio->bi_rw & REQ_FLUSH) return DM_MAPIO_REMAPPED; /* Only tell snapshots if this is a write */ return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; } /* * Set the target "split_io" field to the minimum of all the snapshots' * chunk sizes. */ static void origin_resume(struct dm_target *ti) { struct dm_dev *dev = ti->private; ti->split_io = get_origin_minimum_chunksize(dev->bdev); } static int origin_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { struct dm_dev *dev = ti->private; switch (type) { case STATUSTYPE_INFO: result[0] = '\0'; break; case STATUSTYPE_TABLE: snprintf(result, maxlen, "%s", dev->name); break; } return 0; } static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size) { struct dm_dev *dev = ti->private; struct request_queue *q = bdev_get_queue(dev->bdev); if (!q->merge_bvec_fn) return max_size; bvm->bi_bdev = dev->bdev; bvm->bi_sector = bvm->bi_sector; return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } static int origin_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_dev *dev = ti->private; return fn(ti, dev, 0, ti->len, data); } static struct target_type origin_target = { .name = "snapshot-origin", .version = {1, 7, 1}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, .map = origin_map, .resume = origin_resume, .status = origin_status, .merge = origin_merge, .iterate_devices = origin_iterate_devices, }; static struct target_type snapshot_target = { .name = "snapshot", .version = {1, 10, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_map, .end_io = snapshot_end_io, .preresume = snapshot_preresume, .resume = snapshot_resume, .status = snapshot_status, .iterate_devices = snapshot_iterate_devices, }; static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_merge_map, .end_io = snapshot_end_io, .presuspend = snapshot_merge_presuspend, .preresume = snapshot_preresume, .resume = snapshot_merge_resume, .status = snapshot_status, .iterate_devices = snapshot_iterate_devices, }; static int __init dm_snapshot_init(void) { int r; r = dm_exception_store_init(); if (r) { DMERR("Failed to initialize exception stores"); return r; } r = dm_register_target(&snapshot_target); if (r < 0) { DMERR("snapshot target register failed %d", r); goto bad_register_snapshot_target; } r = dm_register_target(&origin_target); if (r < 0) { DMERR("Origin target register failed %d", r); goto bad_register_origin_target; } r = dm_register_target(&merge_target); if (r < 0) { DMERR("Merge target register failed %d", r); goto bad_register_merge_target; } r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); goto bad_origin_hash; } exception_cache = KMEM_CACHE(dm_exception, 0); if (!exception_cache) { DMERR("Couldn't create exception cache."); r = -ENOMEM; goto bad_exception_cache; } pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); if (!pending_cache) { DMERR("Couldn't create pending cache."); r = -ENOMEM; goto bad_pending_cache; } tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); if (!tracked_chunk_cache) { DMERR("Couldn't create cache to track chunks in use."); r = -ENOMEM; goto bad_tracked_chunk_cache; } return 0; bad_tracked_chunk_cache: kmem_cache_destroy(pending_cache); bad_pending_cache: kmem_cache_destroy(exception_cache); bad_exception_cache: exit_origin_hash(); bad_origin_hash: dm_unregister_target(&merge_target); bad_register_merge_target: dm_unregister_target(&origin_target); bad_register_origin_target: dm_unregister_target(&snapshot_target); bad_register_snapshot_target: dm_exception_store_exit(); return r; } static void __exit dm_snapshot_exit(void) { dm_unregister_target(&snapshot_target); dm_unregister_target(&origin_target); dm_unregister_target(&merge_target); exit_origin_hash(); kmem_cache_destroy(pending_cache); kmem_cache_destroy(exception_cache); kmem_cache_destroy(tracked_chunk_cache); dm_exception_store_exit(); } /* Module hooks */ module_init(dm_snapshot_init); module_exit(dm_snapshot_exit); MODULE_DESCRIPTION(DM_NAME " snapshot target"); MODULE_AUTHOR("Joe Thornber"); MODULE_LICENSE("GPL");
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_cyanogen_tomato
scripts/dtc/data.c
5780
5265
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" void data_free(struct data d) { struct marker *m, *nm; m = d.markers; while (m) { nm = m->next; free(m->ref); free(m); m = nm; } if (d.val) free(d.val); } struct data data_grow_for(struct data d, int xlen) { struct data nd; int newsize; if (xlen == 0) return d; nd = d; newsize = xlen; while ((d.len + xlen) > newsize) newsize *= 2; nd.val = xrealloc(d.val, newsize); return nd; } struct data data_copy_mem(const char *mem, int len) { struct data d; d = data_grow_for(empty_data, len); d.len = len; memcpy(d.val, mem, len); return d; } struct data data_copy_escape_string(const char *s, int len) { int i = 0; struct data d; char *q; d = data_grow_for(empty_data, strlen(s)+1); q = d.val; while (i < len) { char c = s[i++]; if (c == '\\') c = get_escape_char(s, &i); q[d.len++] = c; } q[d.len++] = '\0'; return d; } struct data data_copy_file(FILE *f, size_t maxlen) { struct data d = empty_data; while (!feof(f) && (d.len < maxlen)) { size_t chunksize, ret; if (maxlen == -1) chunksize = 4096; else chunksize = maxlen - d.len; d = data_grow_for(d, chunksize); ret = fread(d.val + d.len, 1, chunksize, f); if (ferror(f)) die("Error reading file into data: %s", strerror(errno)); if (d.len + ret < d.len) die("Overflow reading file into data\n"); d.len += ret; } return d; } struct data data_append_data(struct data d, const void *p, int len) { d = data_grow_for(d, len); memcpy(d.val + d.len, p, len); d.len += len; return d; } struct data data_insert_at_marker(struct data d, struct marker *m, const void *p, int len) { d = data_grow_for(d, len); memmove(d.val + m->offset + len, d.val + m->offset, d.len - m->offset); memcpy(d.val + m->offset, p, len); d.len += len; /* Adjust all markers after the one we're inserting at */ m = m->next; for_each_marker(m) m->offset += len; return d; } static struct data data_append_markers(struct data d, struct marker *m) { struct marker **mp = &d.markers; /* Find the end of the markerlist */ while (*mp) mp = &((*mp)->next); *mp = m; return d; } struct data data_merge(struct data d1, struct data d2) { struct data d; struct marker *m2 = d2.markers; d = data_append_markers(data_append_data(d1, d2.val, d2.len), m2); /* Adjust for the length of d1 */ for_each_marker(m2) m2->offset += d1.len; d2.markers = NULL; /* So data_free() doesn't clobber them */ data_free(d2); return d; } struct data data_append_integer(struct data d, uint64_t value, int bits) { uint8_t value_8; uint16_t value_16; uint32_t value_32; uint64_t value_64; switch (bits) { case 8: value_8 = value; return data_append_data(d, &value_8, 1); case 16: value_16 = cpu_to_fdt16(value); return data_append_data(d, &value_16, 2); case 32: value_32 = cpu_to_fdt32(value); return data_append_data(d, &value_32, 4); case 64: value_64 = cpu_to_fdt64(value); return data_append_data(d, &value_64, 8); default: die("Invalid literal size (%d)\n", bits); } } struct data data_append_re(struct data d, const struct fdt_reserve_entry *re) { struct fdt_reserve_entry bere; bere.address = cpu_to_fdt64(re->address); bere.size = cpu_to_fdt64(re->size); return data_append_data(d, &bere, sizeof(bere)); } struct data data_append_cell(struct data d, cell_t word) { return data_append_integer(d, word, sizeof(word) * 8); } struct data data_append_addr(struct data d, uint64_t addr) { return data_append_integer(d, addr, sizeof(addr) * 8); } struct data data_append_byte(struct data d, uint8_t byte) { return data_append_data(d, &byte, 1); } struct data data_append_zeroes(struct data d, int len) { d = data_grow_for(d, len); memset(d.val + d.len, 0, len); d.len += len; return d; } struct data data_append_align(struct data d, int align) { int newlen = ALIGN(d.len, align); return data_append_zeroes(d, newlen - d.len); } struct data data_add_marker(struct data d, enum markertype type, char *ref) { struct marker *m; m = xmalloc(sizeof(*m)); m->offset = d.len; m->type = type; m->ref = ref; m->next = NULL; return data_append_markers(d, m); } int data_is_one_string(struct data d) { int i; int len = d.len; if (len == 0) return 0; for (i = 0; i < len-1; i++) if (d.val[i] == '\0') return 0; if (d.val[len-1] != '\0') return 0; return 1; }
gpl-2.0
faux123/kernel-msm
arch/x86/power/hibernate_64.c
8340
4177
/* * Hibernation support for x86-64 * * Distribute under GPLv2 * * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ #include <linux/gfp.h> #include <linux/smp.h> #include <linux/suspend.h> #include <asm/proto.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> #include <asm/suspend.h> /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; /* Defined in hibernate_asm_64.S */ extern int restore_image(void); /* * Address to jump to in the last phase of restore in order to get to the image * kernel's text (this value is passed in the image header). */ unsigned long restore_jump_address; /* * Value of the cr3 register from before the hibernation (this value is passed * in the image header). */ unsigned long restore_cr3; pgd_t *temp_level4_pgt; void *relocated_restore_code; static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) { long i, j; i = pud_index(address); pud = pud + i; for (; i < PTRS_PER_PUD; pud++, i++) { unsigned long paddr; pmd_t *pmd; paddr = address + i*PUD_SIZE; if (paddr >= end) break; pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!pmd) return -ENOMEM; set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { unsigned long pe; if (paddr >= end) break; pe = __PAGE_KERNEL_LARGE_EXEC | paddr; pe &= __supported_pte_mask; set_pmd(pmd, __pmd(pe)); } } return 0; } static int set_up_temporary_mappings(void) { unsigned long start, end, next; int error; temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!temp_level4_pgt) return -ENOMEM; /* It is safe to reuse the original kernel mapping */ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), init_level4_pgt[pgd_index(__START_KERNEL_map)]); /* Set up the direct mapping from scratch */ start = (unsigned long)pfn_to_kaddr(0); end = (unsigned long)pfn_to_kaddr(max_pfn); for (; start < end; start = next) { pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); if (!pud) return -ENOMEM; next = start + PGDIR_SIZE; if (next > end) next = end; if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) return error; set_pgd(temp_level4_pgt + pgd_index(start), mk_kernel_pgd(__pa(pud))); } return 0; } int swsusp_arch_resume(void) { int error; /* We have got enough memory and from now on we cannot recover */ if ((error = set_up_temporary_mappings())) return error; relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); if (!relocated_restore_code) return -ENOMEM; memcpy(relocated_restore_code, &core_restore_code, &restore_registers - &core_restore_code); restore_image(); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } struct restore_data_record { unsigned long jump_address; unsigned long cr3; unsigned long magic; }; #define RESTORE_MAGIC 0x0123456789ABCDEFUL /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header * @addr: address to save the data at */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct restore_data_record *rdr = addr; if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = restore_jump_address; rdr->cr3 = restore_cr3; rdr->magic = RESTORE_MAGIC; return 0; } /** * arch_hibernation_header_restore - read the architecture specific data * from the hibernation image header * @addr: address to read the data from */ int arch_hibernation_header_restore(void *addr) { struct restore_data_record *rdr = addr; restore_jump_address = rdr->jump_address; restore_cr3 = rdr->cr3; return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; }
gpl-2.0
pranav01/android_kernel_xiaomi_armani
arch/powerpc/boot/mpc8xx.c
13972
1692
/* * MPC8xx support functions * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "types.h" #include "fsl-soc.h" #include "mpc8xx.h" #include "stdio.h" #include "io.h" #define MPC8XX_PLPRCR (0x284/4) /* PLL and Reset Control Register */ /* Return system clock from crystal frequency */ u32 mpc885_get_clock(u32 crystal) { u32 *immr; u32 plprcr; int mfi, mfn, mfd, pdf, div; u32 ret; immr = fsl_get_immr(); if (!immr) { printf("mpc885_get_clock: Couldn't get IMMR base.\r\n"); return 0; } plprcr = in_be32(&immr[MPC8XX_PLPRCR]); mfi = (plprcr >> 16) & 15; if (mfi < 5) { printf("Warning: PLPRCR[MFI] value of %d out-of-bounds\r\n", mfi); mfi = 5; } pdf = (plprcr >> 1) & 0xf; div = (plprcr >> 20) & 3; mfd = (plprcr >> 22) & 0x1f; mfn = (plprcr >> 27) & 0x1f; ret = crystal * mfi; if (mfn != 0) ret += crystal * mfn / (mfd + 1); return ret / (pdf + 1); } /* Set common device tree fields based on the given clock frequencies. */ void mpc8xx_set_clocks(u32 sysclk) { void *node; dt_fixup_cpu_clocks(sysclk, sysclk / 16, sysclk); node = finddevice("/soc/cpm"); if (node) setprop(node, "clock-frequency", &sysclk, 4); node = finddevice("/soc/cpm/brg"); if (node) setprop(node, "clock-frequency", &sysclk, 4); } int mpc885_fixup_clocks(u32 crystal) { u32 sysclk = mpc885_get_clock(crystal); if (!sysclk) return 0; mpc8xx_set_clocks(sysclk); return 1; }
gpl-2.0
uberlaggydarwin/bugfree-wookie
fs/nfs/nfs3acl.c
149
9939
#include <linux/fs.h> #include <linux/gfp.h> #include <linux/nfs.h> #include <linux/nfs3.h> #include <linux/nfs_fs.h> #include <linux/posix_acl_xattr.h> #include <linux/nfsacl.h> #include "internal.h" #define NFSDBG_FACILITY NFSDBG_PROC ssize_t nfs3_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct inode *inode = dentry->d_inode; struct posix_acl *acl; int pos=0, len=0; # define output(s) do { \ if (pos + sizeof(s) <= size) { \ memcpy(buffer + pos, s, sizeof(s)); \ pos += sizeof(s); \ } \ len += sizeof(s); \ } while(0) acl = nfs3_proc_getacl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { output("system.posix_acl_access"); posix_acl_release(acl); } if (S_ISDIR(inode->i_mode)) { acl = nfs3_proc_getacl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { output("system.posix_acl_default"); posix_acl_release(acl); } } # undef output if (!buffer || len <= size) return len; return -ERANGE; } ssize_t nfs3_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { struct inode *inode = dentry->d_inode; struct posix_acl *acl; int type, error = 0; if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) type = ACL_TYPE_ACCESS; else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) type = ACL_TYPE_DEFAULT; else return -EOPNOTSUPP; acl = nfs3_proc_getacl(inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { if (type == ACL_TYPE_ACCESS && acl->a_count == 0) error = -ENODATA; else error = posix_acl_to_xattr(acl, buffer, size); posix_acl_release(acl); } else error = -ENODATA; return error; } int nfs3_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = dentry->d_inode; struct posix_acl *acl; int type, error; if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) type = ACL_TYPE_ACCESS; else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) type = ACL_TYPE_DEFAULT; else return -EOPNOTSUPP; acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); error = nfs3_proc_setacl(inode, type, acl); posix_acl_release(acl); return error; } int nfs3_removexattr(struct dentry *dentry, const char *name) { struct inode *inode = dentry->d_inode; int type; if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) type = ACL_TYPE_ACCESS; else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) type = ACL_TYPE_DEFAULT; else return -EOPNOTSUPP; return nfs3_proc_setacl(inode, type, NULL); } static void __nfs3_forget_cached_acls(struct nfs_inode *nfsi) { if (!IS_ERR(nfsi->acl_access)) { posix_acl_release(nfsi->acl_access); nfsi->acl_access = ERR_PTR(-EAGAIN); } if (!IS_ERR(nfsi->acl_default)) { posix_acl_release(nfsi->acl_default); nfsi->acl_default = ERR_PTR(-EAGAIN); } } void nfs3_forget_cached_acls(struct inode *inode) { dprintk("NFS: nfs3_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); spin_lock(&inode->i_lock); __nfs3_forget_cached_acls(NFS_I(inode)); spin_unlock(&inode->i_lock); } static struct posix_acl *nfs3_get_cached_acl(struct inode *inode, int type) { struct nfs_inode *nfsi = NFS_I(inode); struct posix_acl *acl = ERR_PTR(-EINVAL); spin_lock(&inode->i_lock); switch(type) { case ACL_TYPE_ACCESS: acl = nfsi->acl_access; break; case ACL_TYPE_DEFAULT: acl = nfsi->acl_default; break; default: goto out; } if (IS_ERR(acl)) acl = ERR_PTR(-EAGAIN); else acl = posix_acl_dup(acl); out: spin_unlock(&inode->i_lock); dprintk("NFS: nfs3_get_cached_acl(%s/%ld, %d) = %p\n", inode->i_sb->s_id, inode->i_ino, type, acl); return acl; } static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl, struct posix_acl *dfacl) { struct nfs_inode *nfsi = NFS_I(inode); dprintk("nfs3_cache_acls(%s/%ld, %p, %p)\n", inode->i_sb->s_id, inode->i_ino, acl, dfacl); spin_lock(&inode->i_lock); __nfs3_forget_cached_acls(NFS_I(inode)); if (!IS_ERR(acl)) nfsi->acl_access = posix_acl_dup(acl); if (!IS_ERR(dfacl)) nfsi->acl_default = posix_acl_dup(dfacl); spin_unlock(&inode->i_lock); } struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFSACL_MAXPAGES] = { }; struct nfs3_getaclargs args = { .fh = NFS_FH(inode), .pages = pages, }; struct nfs3_getaclres res = { NULL, }; struct rpc_message msg = { .rpc_argp = &args, .rpc_resp = &res, }; struct posix_acl *acl; int status, count; if (!nfs_server_capable(inode, NFS_CAP_ACLS)) return ERR_PTR(-EOPNOTSUPP); status = nfs_revalidate_inode(server, inode); if (status < 0) return ERR_PTR(status); acl = nfs3_get_cached_acl(inode, type); if (acl != ERR_PTR(-EAGAIN)) return acl; acl = NULL; if (type == ACL_TYPE_ACCESS) args.mask |= NFS_ACLCNT|NFS_ACL; if (S_ISDIR(inode->i_mode)) args.mask |= NFS_DFACLCNT|NFS_DFACL; if (args.mask == 0) return NULL; dprintk("NFS call getacl\n"); msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL]; res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return ERR_PTR(-ENOMEM); status = rpc_call_sync(server->client_acl, &msg, 0); dprintk("NFS reply getacl: %d\n", status); for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) __free_page(args.pages[count]); switch (status) { case 0: status = nfs_refresh_inode(inode, res.fattr); break; case -EPFNOSUPPORT: case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL extension not supported; disabling\n"); server->caps &= ~NFS_CAP_ACLS; case -ENOTSUPP: status = -EOPNOTSUPP; default: goto getout; } if ((args.mask & res.mask) != args.mask) { status = -EIO; goto getout; } if (res.acl_access != NULL) { if (posix_acl_equiv_mode(res.acl_access, NULL) == 0) { posix_acl_release(res.acl_access); res.acl_access = NULL; } } nfs3_cache_acls(inode, (res.mask & NFS_ACL) ? res.acl_access : ERR_PTR(-EINVAL), (res.mask & NFS_DFACL) ? res.acl_default : ERR_PTR(-EINVAL)); switch(type) { case ACL_TYPE_ACCESS: acl = res.acl_access; res.acl_access = NULL; break; case ACL_TYPE_DEFAULT: acl = res.acl_default; res.acl_default = NULL; } getout: posix_acl_release(res.acl_access); posix_acl_release(res.acl_default); nfs_free_fattr(res.fattr); if (status != 0) { posix_acl_release(acl); acl = ERR_PTR(status); } return acl; } static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, struct posix_acl *dfacl) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_fattr *fattr; struct page *pages[NFSACL_MAXPAGES]; struct nfs3_setaclargs args = { .inode = inode, .mask = NFS_ACL, .acl_access = acl, .pages = pages, }; struct rpc_message msg = { .rpc_argp = &args, .rpc_resp = &fattr, }; int status; status = -EOPNOTSUPP; if (!nfs_server_capable(inode, NFS_CAP_ACLS)) goto out; status = -ENOSPC; if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES) goto out; if (dfacl != NULL && dfacl->a_count > NFS_ACL_MAX_ENTRIES) goto out; if (S_ISDIR(inode->i_mode)) { args.mask |= NFS_DFACL; args.acl_default = dfacl; args.len = nfsacl_size(acl, dfacl); } else args.len = nfsacl_size(acl, NULL); if (args.len > NFS_ACL_INLINE_BUFSIZE) { unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT); status = -ENOMEM; do { args.pages[args.npages] = alloc_page(GFP_KERNEL); if (args.pages[args.npages] == NULL) goto out_freepages; args.npages++; } while (args.npages < npages); } dprintk("NFS call setacl\n"); status = -ENOMEM; fattr = nfs_alloc_fattr(); if (fattr == NULL) goto out_freepages; msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL]; msg.rpc_resp = fattr; status = rpc_call_sync(server->client_acl, &msg, 0); nfs_access_zap_cache(inode); nfs_zap_acl_cache(inode); dprintk("NFS reply setacl: %d\n", status); switch (status) { case 0: status = nfs_refresh_inode(inode, fattr); nfs3_cache_acls(inode, acl, dfacl); break; case -EPFNOSUPPORT: case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL SETACL RPC not supported" "(will not retry)\n"); server->caps &= ~NFS_CAP_ACLS; case -ENOTSUPP: status = -EOPNOTSUPP; } nfs_free_fattr(fattr); out_freepages: while (args.npages != 0) { args.npages--; __free_page(args.pages[args.npages]); } out: return status; } int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl) { struct posix_acl *alloc = NULL, *dfacl = NULL; int status; if (S_ISDIR(inode->i_mode)) { switch(type) { case ACL_TYPE_ACCESS: alloc = dfacl = nfs3_proc_getacl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(alloc)) goto fail; break; case ACL_TYPE_DEFAULT: dfacl = acl; alloc = acl = nfs3_proc_getacl(inode, ACL_TYPE_ACCESS); if (IS_ERR(alloc)) goto fail; break; default: return -EINVAL; } } else if (type != ACL_TYPE_ACCESS) return -EINVAL; if (acl == NULL) { alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(alloc)) goto fail; } status = nfs3_proc_setacls(inode, acl, dfacl); posix_acl_release(alloc); return status; fail: return PTR_ERR(alloc); } int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, umode_t mode) { struct posix_acl *dfacl, *acl; int error = 0; dfacl = nfs3_proc_getacl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(dfacl)) { error = PTR_ERR(dfacl); return (error == -EOPNOTSUPP) ? 0 : error; } if (!dfacl) return 0; acl = posix_acl_dup(dfacl); error = posix_acl_create(&acl, GFP_KERNEL, &mode); if (error < 0) goto out_release_dfacl; error = nfs3_proc_setacls(inode, acl, S_ISDIR(inode->i_mode) ? dfacl : NULL); posix_acl_release(acl); out_release_dfacl: posix_acl_release(dfacl); return error; }
gpl-2.0
hzq1001/or1k-src
newlib/libc/machine/spu/read_ea.c
149
2033
/* (C) Copyright IBM Corp. 2008 All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of IBM nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Ken Werner <ken.werner@de.ibm.com> */ #include "ea_internal.h" #include <ea.h> #include <spu_cache.h> #include "sys/linux_syscalls.h" extern void __cache_flush (void) __attribute__ ((weak)); COMPAT_EA_ALIAS (read_ea); ssize_ea_t read_ea (int fd, __ea void *buf, size_ea_t count) { struct spu_syscall_block s = { __NR_read, { fd, (size_ea_t) buf, count, 0, 0, 0} }; /* Flush cache only if the application really uses the software cache. */ if (__cache_flush) __cache_flush (); return __linux_syscall (&s); }
gpl-2.0
Dm47021/Holo-a200
drivers/net/bcm63xx_enet.c
149
49181
/* * Driver for BCM963xx builtin Ethernet mac * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/if_vlan.h> #include <bcm63xx_dev_enet.h> #include "bcm63xx_enet.h" static char bcm_enet_driver_name[] = "bcm63xx_enet"; static char bcm_enet_driver_version[] = "1.0"; static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); /* io memory shared between all devices */ static void __iomem *bcm_enet_shared_base; /* * io helpers to access mac registers */ static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(priv->base + off); } static inline void enet_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { bcm_writel(val, priv->base + off); } /* * io helpers to access shared registers */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(bcm_enet_shared_base + off); } static inline void enet_dma_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { bcm_writel(val, bcm_enet_shared_base + off); } /* * write given data into mii register and wait for transfer to end * with timeout (average measured transfer time is 25us) */ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) { int limit; /* make sure mii interrupt status is cleared */ enet_writel(priv, ENET_IR_MII, ENET_IR_REG); enet_writel(priv, data, ENET_MIIDATA_REG); wmb(); /* busy wait on mii interrupt bit, with timeout */ limit = 1000; do { if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) break; udelay(1); } while (limit-- > 0); return (limit < 0) ? 1 : 0; } /* * MII internal read callback */ static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, int regnum) { u32 tmp, val; tmp = regnum << ENET_MIIDATA_REG_SHIFT; tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; tmp |= ENET_MIIDATA_OP_READ_MASK; if (do_mdio_op(priv, tmp)) return -1; val = enet_readl(priv, ENET_MIIDATA_REG); val &= 0xffff; return val; } /* * MII internal write callback */ static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, int regnum, u16 value) { u32 tmp; tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; tmp |= regnum << ENET_MIIDATA_REG_SHIFT; tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; tmp |= ENET_MIIDATA_OP_WRITE_MASK; (void)do_mdio_op(priv, tmp); return 0; } /* * MII read callback from phylib */ static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, int regnum) { return bcm_enet_mdio_read(bus->priv, mii_id, regnum); } /* * MII write callback from phylib */ static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, int regnum, u16 value) { return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); } /* * MII read callback from mii core */ static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, int regnum) { return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); } /* * MII write callback from mii core */ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, int regnum, int value) { bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); } /* * refill rx queue */ static int bcm_enet_refill_rx(struct net_device *dev) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); while (priv->rx_desc_count < priv->rx_ring_size) { struct bcm_enet_desc *desc; struct sk_buff *skb; dma_addr_t p; int desc_idx; u32 len_stat; desc_idx = priv->rx_dirty_desc; desc = &priv->rx_desc_cpu[desc_idx]; if (!priv->rx_skb[desc_idx]) { skb = netdev_alloc_skb(dev, priv->rx_skb_size); if (!skb) break; priv->rx_skb[desc_idx] = skb; p = dma_map_single(&priv->pdev->dev, skb->data, priv->rx_skb_size, DMA_FROM_DEVICE); desc->address = p; } len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { len_stat |= DMADESC_WRAP_MASK; priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; } wmb(); desc->len_stat = len_stat; priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); } /* If rx ring is still empty, set a timer to try allocating * again at a later time. */ if (priv->rx_desc_count == 0 && netif_running(dev)) { dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); priv->rx_timeout.expires = jiffies + HZ; add_timer(&priv->rx_timeout); } return 0; } /* * timer callback to defer refill rx queue in case we're OOM */ static void bcm_enet_refill_rx_timer(unsigned long data) { struct net_device *dev; struct bcm_enet_priv *priv; dev = (struct net_device *)data; priv = netdev_priv(dev); spin_lock(&priv->rx_lock); bcm_enet_refill_rx((struct net_device *)data); spin_unlock(&priv->rx_lock); } /* * extract packet from rx queue */ static int bcm_enet_receive_queue(struct net_device *dev, int budget) { struct bcm_enet_priv *priv; struct device *kdev; int processed; priv = netdev_priv(dev); kdev = &priv->pdev->dev; processed = 0; /* don't scan ring further than number of refilled * descriptor */ if (budget > priv->rx_desc_count) budget = priv->rx_desc_count; do { struct bcm_enet_desc *desc; struct sk_buff *skb; int desc_idx; u32 len_stat; unsigned int len; desc_idx = priv->rx_curr_desc; desc = &priv->rx_desc_cpu[desc_idx]; /* make sure we actually read the descriptor status at * each loop */ rmb(); len_stat = desc->len_stat; /* break if dma ownership belongs to hw */ if (len_stat & DMADESC_OWNER_MASK) break; processed++; priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size) priv->rx_curr_desc = 0; priv->rx_desc_count--; /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { dev->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ if (unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) dev->stats.rx_length_errors++; if (len_stat & DMADESC_CRC_MASK) dev->stats.rx_crc_errors++; if (len_stat & DMADESC_UNDER_MASK) dev->stats.rx_frame_errors++; if (len_stat & DMADESC_OV_MASK) dev->stats.rx_fifo_errors++; continue; } /* valid packet */ skb = priv->rx_skb[desc_idx]; len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */ len -= 4; if (len < copybreak) { struct sk_buff *nskb; nskb = netdev_alloc_skb_ip_align(dev, len); if (!nskb) { /* forget packet, just rearm desc */ dev->stats.rx_dropped++; continue; } dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); memcpy(nskb->data, skb->data, len); dma_sync_single_for_device(kdev, desc->address, len, DMA_FROM_DEVICE); skb = nskb; } else { dma_unmap_single(&priv->pdev->dev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); priv->rx_skb[desc_idx] = NULL; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += len; netif_receive_skb(skb); } while (--budget > 0); if (processed || !priv->rx_desc_count) { bcm_enet_refill_rx(dev); /* kick rx dma */ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); } return processed; } /* * try to or force reclaim of transmitted buffers */ static int bcm_enet_tx_reclaim(struct net_device *dev, int force) { struct bcm_enet_priv *priv; int released; priv = netdev_priv(dev); released = 0; while (priv->tx_desc_count < priv->tx_ring_size) { struct bcm_enet_desc *desc; struct sk_buff *skb; /* We run in a bh and fight against start_xmit, which * is called with bh disabled */ spin_lock(&priv->tx_lock); desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { spin_unlock(&priv->tx_lock); break; } /* ensure other field of the descriptor were not read * before we checked ownership */ rmb(); skb = priv->tx_skb[priv->tx_dirty_desc]; priv->tx_skb[priv->tx_dirty_desc] = NULL; dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, DMA_TO_DEVICE); priv->tx_dirty_desc++; if (priv->tx_dirty_desc == priv->tx_ring_size) priv->tx_dirty_desc = 0; priv->tx_desc_count++; spin_unlock(&priv->tx_lock); if (desc->len_stat & DMADESC_UNDER_MASK) dev->stats.tx_errors++; dev_kfree_skb(skb); released++; } if (netif_queue_stopped(dev) && released) netif_wake_queue(dev); return released; } /* * poll func, called by network core */ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; int tx_work_done, rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; /* ack interrupts */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->tx_chan)); /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); if (rx_work_done >= budget || tx_work_done > 0) { /* rx/tx queue is not yet empty/clean */ return rx_work_done; } /* no more packet in rx/tx queue, remove device from poll * queue */ napi_complete(napi); /* restore rx/tx interrupt */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->tx_chan)); return rx_work_done; } /* * mac interrupt handler */ static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) { struct net_device *dev; struct bcm_enet_priv *priv; u32 stat; dev = dev_id; priv = netdev_priv(dev); stat = enet_readl(priv, ENET_IR_REG); if (!(stat & ENET_IR_MIB)) return IRQ_NONE; /* clear & mask interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, 0, ENET_IRMASK_REG); /* read mib registers in workqueue */ schedule_work(&priv->mib_update_task); return IRQ_HANDLED; } /* * rx/tx dma interrupt handler */ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) { struct net_device *dev; struct bcm_enet_priv *priv; dev = dev_id; priv = netdev_priv(dev); /* mask rx/tx interrupts */ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); napi_schedule(&priv->napi); return IRQ_HANDLED; } /* * tx request callback */ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; int ret; priv = netdev_priv(dev); /* lock against tx reclaim */ spin_lock(&priv->tx_lock); /* make sure the tx hw queue is not full, should not happen * since we stop queue before it's the case */ if (unlikely(!priv->tx_desc_count)) { netif_stop_queue(dev); dev_err(&priv->pdev->dev, "xmit called with no tx desc " "available?\n"); ret = NETDEV_TX_BUSY; goto out_unlock; } /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; /* fill descriptor */ desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; len_stat |= DMADESC_WRAP_MASK; } priv->tx_desc_count--; /* dma might be already polling, make sure we update desc * fields in correct order */ wmb(); desc->len_stat = len_stat; wmb(); /* kick tx dma */ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->tx_chan)); /* stop queue if no more desc available */ if (!priv->tx_desc_count) netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; ret = NETDEV_TX_OK; out_unlock: spin_unlock(&priv->tx_lock); return ret; } /* * Change the interface's mac address. */ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) { struct bcm_enet_priv *priv; struct sockaddr *addr = p; u32 val; priv = netdev_priv(dev); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); /* use perfect match register 0 to store my mac address */ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | (dev->dev_addr[4] << 8) | dev->dev_addr[5]; enet_writel(priv, val, ENET_PML_REG(0)); val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); val |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, val, ENET_PMH_REG(0)); return 0; } /* * Change rx mode (promiscuous/allmulti) and update multicast list */ static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; struct netdev_hw_addr *ha; u32 val; int i; priv = netdev_priv(dev); val = enet_readl(priv, ENET_RXCFG_REG); if (dev->flags & IFF_PROMISC) val |= ENET_RXCFG_PROMISC_MASK; else val &= ~ENET_RXCFG_PROMISC_MASK; /* only 3 perfect match registers left, first one is used for * own mac address */ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) val |= ENET_RXCFG_ALLMCAST_MASK; else val &= ~ENET_RXCFG_ALLMCAST_MASK; /* no need to set perfect match registers if we catch all * multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) { enet_writel(priv, val, ENET_RXCFG_REG); return; } i = 0; netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); tmp = (dmi_addr[0] << 8 | dmi_addr[1]); tmp |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); } for (; i < 3; i++) { enet_writel(priv, 0, ENET_PML_REG(i + 1)); enet_writel(priv, 0, ENET_PMH_REG(i + 1)); } enet_writel(priv, val, ENET_RXCFG_REG); } /* * set mac duplex parameters */ static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) { u32 val; val = enet_readl(priv, ENET_TXCTL_REG); if (fullduplex) val |= ENET_TXCTL_FD_MASK; else val &= ~ENET_TXCTL_FD_MASK; enet_writel(priv, val, ENET_TXCTL_REG); } /* * set mac flow control parameters */ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) { u32 val; /* rx flow control (pause frame handling) */ val = enet_readl(priv, ENET_RXCFG_REG); if (rx_en) val |= ENET_RXCFG_ENFLOW_MASK; else val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); else val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); enet_dma_writel(priv, val, ENETDMA_CFG_REG); } /* * link changed callback (from phylib) */ static void bcm_enet_adjust_phy_link(struct net_device *dev) { struct bcm_enet_priv *priv; struct phy_device *phydev; int status_changed; priv = netdev_priv(dev); phydev = priv->phydev; status_changed = 0; if (priv->old_link != phydev->link) { status_changed = 1; priv->old_link = phydev->link; } /* reflect duplex change in mac configuration */ if (phydev->link && phydev->duplex != priv->old_duplex) { bcm_enet_set_duplex(priv, (phydev->duplex == DUPLEX_FULL) ? 1 : 0); status_changed = 1; priv->old_duplex = phydev->duplex; } /* enable flow control if remote advertise it (trust phylib to * check that duplex is full */ if (phydev->link && phydev->pause != priv->old_pause) { int rx_pause_en, tx_pause_en; if (phydev->pause) { /* pause was advertised by lpa and us */ rx_pause_en = 1; tx_pause_en = 1; } else if (!priv->pause_auto) { /* pause setting overrided by user */ rx_pause_en = priv->pause_rx; tx_pause_en = priv->pause_tx; } else { rx_pause_en = 0; tx_pause_en = 0; } bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); status_changed = 1; priv->old_pause = phydev->pause; } if (status_changed) { pr_info("%s: link %s", dev->name, phydev->link ? "UP" : "DOWN"); if (phydev->link) pr_cont(" - %d/%s - flow control %s", phydev->speed, DUPLEX_FULL == phydev->duplex ? "full" : "half", phydev->pause == 1 ? "rx&tx" : "off"); pr_cont("\n"); } } /* * link changed callback (if phylib is not used) */ static void bcm_enet_adjust_link(struct net_device *dev) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); bcm_enet_set_duplex(priv, priv->force_duplex_full); bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); netif_carrier_on(dev); pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", dev->name, priv->force_speed_100 ? 100 : 10, priv->force_duplex_full ? "full" : "half", priv->pause_rx ? "rx" : "off", priv->pause_tx ? "tx" : "off"); } /* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enet_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct sockaddr addr; struct device *kdev; struct phy_device *phydev; int i, ret; unsigned int size; char phy_id[MII_BUS_ID_SIZE + 3]; void *p; u32 val; priv = netdev_priv(dev); kdev = &priv->pdev->dev; if (priv->has_phy) { /* connect to PHY */ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->mac_id ? "1" : "0", priv->phy_id); phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_MII); phydev->advertising = phydev->supported; if (priv->pause_auto && priv->pause_rx && priv->pause_tx) phydev->advertising |= SUPPORTED_Pause; else phydev->advertising &= ~SUPPORTED_Pause; dev_info(kdev, "attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; priv->phydev = phydev; } /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) goto out_phy_disconnect; ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq; ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq_rx; /* initialize perfect match registers */ for (i = 0; i < 4; i++) { enet_writel(priv, 0, ENET_PML_REG(i)); enet_writel(priv, 0, ENET_PMH_REG(i)); } /* write device mac address */ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); bcm_enet_set_mac_address(dev, &addr); /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ enet_dma_writel(priv, priv->rx_desc_dma, ENETDMA_RSTART_REG(priv->rx_chan)); enet_dma_writel(priv, priv->tx_desc_dma, ENETDMA_RSTART_REG(priv->tx_chan)); /* clear remaining state ram for rx & tx channel */ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->rx_chan)); enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->tx_chan)); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->tx_chan)); if (priv->has_phy) phy_start(priv->phydev); else bcm_enet_adjust_link(dev); netif_start_queue(dev); return 0; out: for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } kfree(priv->rx_skb); out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: free_irq(priv->irq_tx, dev); out_freeirq_rx: free_irq(priv->irq_rx, dev); out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: phy_disconnect(priv->phydev); return ret; } /* * disable mac */ static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) { int limit; u32 val; val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_DISABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); limit = 1000; do { u32 val; val = enet_readl(priv, ENET_CTL_REG); if (!(val & ENET_CTL_DISABLE_MASK)) break; udelay(1); } while (limit--); } /* * disable dma in given channel */ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); limit = 1000; do { u32 val; val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); if (!(val & ENETDMA_CHANCFG_EN_MASK)) break; udelay(1); } while (limit--); } /* * stop callback */ static int bcm_enet_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; netif_stop_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(priv->phydev); del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); bcm_enet_disable_dma(priv, priv->rx_chan); bcm_enet_disable_mac(priv); /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); /* free the rx skb ring */ for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } /* free remaining allocated memory */ kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); free_irq(dev->irq, dev); /* release phy */ if (priv->has_phy) { phy_disconnect(priv->phydev); priv->phydev = NULL; } return 0; } /* * ethtool callbacks */ struct bcm_enet_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; int mib_reg; }; #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ offsetof(struct bcm_enet_priv, m) #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ offsetof(struct net_device_stats, m) static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { { "rx_packets", DEV_STAT(rx_packets), -1 }, { "tx_packets", DEV_STAT(tx_packets), -1 }, { "rx_bytes", DEV_STAT(rx_bytes), -1 }, { "tx_bytes", DEV_STAT(tx_bytes), -1 }, { "rx_errors", DEV_STAT(rx_errors), -1 }, { "tx_errors", DEV_STAT(tx_errors), -1 }, { "rx_dropped", DEV_STAT(rx_dropped), -1 }, { "tx_dropped", DEV_STAT(tx_dropped), -1 }, { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, }; #define BCM_ENET_STATS_LEN \ (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) static const u32 unused_mib_regs[] = { ETH_MIB_TX_ALL_OCTETS, ETH_MIB_TX_ALL_PKTS, ETH_MIB_RX_ALL_OCTETS, ETH_MIB_RX_ALL_PKTS, }; static void bcm_enet_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, bcm_enet_driver_name, 32); strncpy(drvinfo->version, bcm_enet_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, "bcm63xx", 32); drvinfo->n_stats = BCM_ENET_STATS_LEN; } static int bcm_enet_get_sset_count(struct net_device *netdev, int string_set) { switch (string_set) { case ETH_SS_STATS: return BCM_ENET_STATS_LEN; default: return -EINVAL; } } static void bcm_enet_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCM_ENET_STATS_LEN; i++) { memcpy(data + i * ETH_GSTRING_LEN, bcm_enet_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); } break; } } static void update_mib_counters(struct bcm_enet_priv *priv) { int i; for (i = 0; i < BCM_ENET_STATS_LEN; i++) { const struct bcm_enet_stats *s; u32 val; char *p; s = &bcm_enet_gstrings_stats[i]; if (s->mib_reg == -1) continue; val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); p = (char *)priv + s->stat_offset; if (s->sizeof_stat == sizeof(u64)) *(u64 *)p += val; else *(u32 *)p += val; } /* also empty unused mib counters to make sure mib counter * overflow interrupt is cleared */ for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); } static void bcm_enet_update_mib_counters_defer(struct work_struct *t) { struct bcm_enet_priv *priv; priv = container_of(t, struct bcm_enet_priv, mib_update_task); mutex_lock(&priv->mib_update_lock); update_mib_counters(priv); mutex_unlock(&priv->mib_update_lock); /* reenable mib interrupt */ if (netif_running(priv->net_dev)) enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); } static void bcm_enet_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct bcm_enet_priv *priv; int i; priv = netdev_priv(netdev); mutex_lock(&priv->mib_update_lock); update_mib_counters(priv); for (i = 0; i < BCM_ENET_STATS_LEN; i++) { const struct bcm_enet_stats *s; char *p; s = &bcm_enet_gstrings_stats[i]; if (s->mib_reg == -1) p = (char *)&netdev->stats; else p = (char *)priv; p += s->stat_offset; data[i] = (s->sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } mutex_unlock(&priv->mib_update_lock); } static int bcm_enet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); cmd->maxrxpkt = 0; cmd->maxtxpkt = 0; if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_ethtool_gset(priv->phydev, cmd); } else { cmd->autoneg = 0; cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10; cmd->duplex = (priv->force_duplex_full) ? DUPLEX_FULL : DUPLEX_HALF; cmd->supported = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; cmd->advertising = 0; cmd->port = PORT_MII; cmd->transceiver = XCVR_EXTERNAL; } return 0; } static int bcm_enet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_ethtool_sset(priv->phydev, cmd); } else { if (cmd->autoneg || (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || cmd->port != PORT_MII) return -EINVAL; priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; if (netif_running(dev)) bcm_enet_adjust_link(dev); return 0; } } static void bcm_enet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); /* rx/tx ring is actually only limited by memory */ ering->rx_max_pending = 8192; ering->tx_max_pending = 8192; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; ering->rx_pending = priv->rx_ring_size; ering->tx_pending = priv->tx_ring_size; } static int bcm_enet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; int was_running; priv = netdev_priv(dev); was_running = 0; if (netif_running(dev)) { bcm_enet_stop(dev); was_running = 1; } priv->rx_ring_size = ering->rx_pending; priv->tx_ring_size = ering->tx_pending; if (was_running) { int err; err = bcm_enet_open(dev); if (err) dev_close(dev); else bcm_enet_set_multicast_list(dev); } return 0; } static void bcm_enet_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); ecmd->autoneg = priv->pause_auto; ecmd->rx_pause = priv->pause_rx; ecmd->tx_pause = priv->pause_tx; } static int bcm_enet_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { /* asymetric pause mode not supported, * actually possible but integrated PHY has RO * asym_pause bit */ return -EINVAL; } } else { /* no pause autoneg on direct mii connection */ if (ecmd->autoneg) return -EINVAL; } priv->pause_auto = ecmd->autoneg; priv->pause_rx = ecmd->rx_pause; priv->pause_tx = ecmd->tx_pause; return 0; } static struct ethtool_ops bcm_enet_ethtool_ops = { .get_strings = bcm_enet_get_strings, .get_sset_count = bcm_enet_get_sset_count, .get_ethtool_stats = bcm_enet_get_ethtool_stats, .get_settings = bcm_enet_get_settings, .set_settings = bcm_enet_set_settings, .get_drvinfo = bcm_enet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = bcm_enet_get_ringparam, .set_ringparam = bcm_enet_set_ringparam, .get_pauseparam = bcm_enet_get_pauseparam, .set_pauseparam = bcm_enet_set_pauseparam, }; static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_mii_ioctl(priv->phydev, rq, cmd); } else { struct mii_if_info mii; mii.dev = dev; mii.mdio_read = bcm_enet_mdio_read_mii; mii.mdio_write = bcm_enet_mdio_write_mii; mii.phy_id = 0; mii.phy_id_mask = 0x3f; mii.reg_num_mask = 0x1f; return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); } } /* * calculate actual hardware mtu */ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) { int actual_mtu; actual_mtu = mtu; /* add ethernet header + vlan tag size */ actual_mtu += VLAN_ETH_HLEN; if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) return -EINVAL; /* * setup maximum size before we get overflow mark in * descriptor, note that this will not prevent reception of * big frames, they will be split into multiple buffers * anyway */ priv->hw_mtu = actual_mtu; /* * align rx buffer size to dma burst len, account FCS since * it's appended */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, BCMENET_DMA_MAXBURST * 4); return 0; } /* * adjust mtu, can't be called while device is running */ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) { int ret; if (netif_running(dev)) return -EBUSY; ret = compute_hw_mtu(netdev_priv(dev), new_mtu); if (ret) return ret; dev->mtu = new_mtu; return 0; } /* * preinit hardware to allow mii operation while device is down */ static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) { u32 val; int limit; /* make sure mac is disabled */ bcm_enet_disable_mac(priv); /* soft reset mac */ val = ENET_CTL_SRESET_MASK; enet_writel(priv, val, ENET_CTL_REG); wmb(); limit = 1000; do { val = enet_readl(priv, ENET_CTL_REG); if (!(val & ENET_CTL_SRESET_MASK)) break; udelay(1); } while (limit--); /* select correct mii interface */ val = enet_readl(priv, ENET_CTL_REG); if (priv->use_external_mii) val |= ENET_CTL_EPHYSEL_MASK; else val &= ~ENET_CTL_EPHYSEL_MASK; enet_writel(priv, val, ENET_CTL_REG); /* turn on mdc clock */ enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); /* set mib counters to self-clear when read */ val = enet_readl(priv, ENET_MIBCTL_REG); val |= ENET_MIBCTL_RDCLEAR_MASK; enet_writel(priv, val, ENET_MIBCTL_REG); } static const struct net_device_ops bcm_enet_ops = { .ndo_open = bcm_enet_open, .ndo_stop = bcm_enet_stop, .ndo_start_xmit = bcm_enet_start_xmit, .ndo_set_mac_address = bcm_enet_set_mac_address, .ndo_set_multicast_list = bcm_enet_set_multicast_list, .ndo_do_ioctl = bcm_enet_ioctl, .ndo_change_mtu = bcm_enet_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_enet_netpoll, #endif }; /* * allocate netdevice, request register memory and register device. */ static int __devinit bcm_enet_probe(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enet_platform_data *pd; struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; const char *clk_name; unsigned int iomem_size; int i, ret; /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ if (!bcm_enet_shared_base) return -ENODEV; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) return -ENODEV; ret = 0; dev = alloc_etherdev(sizeof(*priv)); if (!dev) return -ENOMEM; priv = netdev_priv(dev); ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; iomem_size = res_mem->end - res_mem->start + 1; if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { ret = -EBUSY; goto out; } priv->base = ioremap(res_mem->start, iomem_size); if (priv->base == NULL) { ret = -ENOMEM; goto out_release_mem; } dev->irq = priv->irq = res_irq->start; priv->irq_rx = res_irq_rx->start; priv->irq_tx = res_irq_tx->start; priv->mac_id = pdev->id; /* get rx & tx dma channel id for this mac */ if (priv->mac_id == 0) { priv->rx_chan = 0; priv->tx_chan = 1; clk_name = "enet0"; } else { priv->rx_chan = 2; priv->tx_chan = 3; clk_name = "enet1"; } priv->mac_clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out_unmap; } clk_enable(priv->mac_clk); /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; pd = pdev->dev.platform_data; if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); priv->has_phy = pd->has_phy; priv->phy_id = pd->phy_id; priv->has_phy_interrupt = pd->has_phy_interrupt; priv->phy_interrupt = pd->phy_interrupt; priv->use_external_mii = !pd->use_internal_phy; priv->pause_auto = pd->pause_auto; priv->pause_rx = pd->pause_rx; priv->pause_tx = pd->pause_tx; priv->force_duplex_full = pd->force_duplex_full; priv->force_speed_100 = pd->force_speed_100; } if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { /* using internal PHY, enable clock */ priv->phy_clk = clk_get(&pdev->dev, "ephy"); if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; goto out_put_clk_mac; } clk_enable(priv->phy_clk); } /* do minimal hardware init to be able to probe mii bus */ bcm_enet_hw_preinit(priv); /* MII bus registration */ if (priv->has_phy) { priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) { ret = -ENOMEM; goto out_uninit_hw; } bus = priv->mii_bus; bus->name = "bcm63xx_enet MII bus"; bus->parent = &pdev->dev; bus->priv = priv; bus->read = bcm_enet_mdio_read_phylib; bus->write = bcm_enet_mdio_write_phylib; sprintf(bus->id, "%d", priv->mac_id); /* only probe bus where we think the PHY is, because * the mdio read operation return 0 instead of 0xffff * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto out_free_mdio; } if (priv->has_phy_interrupt) bus->irq[priv->phy_id] = priv->phy_interrupt; else bus->irq[priv->phy_id] = PHY_POLL; ret = mdiobus_register(bus); if (ret) { dev_err(&pdev->dev, "unable to register mdio bus\n"); goto out_free_mdio; } } else { /* run platform code to initialize PHY device */ if (pd->mii_config && pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii)) { dev_err(&pdev->dev, "unable to configure mdio bus\n"); goto out_uninit_hw; } } spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ init_timer(&priv->rx_timeout); priv->rx_timeout.function = bcm_enet_refill_rx_timer; priv->rx_timeout.data = (unsigned long)dev; /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); /* zero mib counters */ for (i = 0; i < ENET_MIB_REG_COUNT; i++) enet_writel(priv, 0, ENET_MIB_REG(i)); /* register netdevice */ dev->netdev_ops = &bcm_enet_ops; netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); SET_NETDEV_DEV(dev, &pdev->dev); ret = register_netdev(dev); if (ret) goto out_unregister_mdio; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); priv->pdev = pdev; priv->net_dev = dev; return 0; out_unregister_mdio: if (priv->mii_bus) { mdiobus_unregister(priv->mii_bus); kfree(priv->mii_bus->irq); } out_free_mdio: if (priv->mii_bus) mdiobus_free(priv->mii_bus); out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->phy_clk) { clk_disable(priv->phy_clk); clk_put(priv->phy_clk); } out_put_clk_mac: clk_disable(priv->mac_clk); clk_put(priv->mac_clk); out_unmap: iounmap(priv->base); out_release_mem: release_mem_region(res_mem->start, iomem_size); out: free_netdev(dev); return ret; } /* * exit func, stops hardware and unregisters netdevice */ static int __devexit bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); priv = netdev_priv(dev); unregister_netdev(dev); /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->has_phy) { mdiobus_unregister(priv->mii_bus); kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } else { struct bcm63xx_enet_platform_data *pd; pd = pdev->dev.platform_data; if (pd && pd->mii_config) pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii); } /* release device resources */ iounmap(priv->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, res->end - res->start + 1); /* disable hw block clocks */ if (priv->phy_clk) { clk_disable(priv->phy_clk); clk_put(priv->phy_clk); } clk_disable(priv->mac_clk); clk_put(priv->mac_clk); platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } struct platform_driver bcm63xx_enet_driver = { .probe = bcm_enet_probe, .remove = __devexit_p(bcm_enet_remove), .driver = { .name = "bcm63xx_enet", .owner = THIS_MODULE, }, }; /* * reserve & remap memory space shared between all macs */ static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) { struct resource *res; unsigned int iomem_size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; iomem_size = res->end - res->start + 1; if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) return -EBUSY; bcm_enet_shared_base = ioremap(res->start, iomem_size); if (!bcm_enet_shared_base) { release_mem_region(res->start, iomem_size); return -ENOMEM; } return 0; } static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) { struct resource *res; iounmap(bcm_enet_shared_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, res->end - res->start + 1); return 0; } /* * this "shared" driver is needed because both macs share a single * address space */ struct platform_driver bcm63xx_enet_shared_driver = { .probe = bcm_enet_shared_probe, .remove = __devexit_p(bcm_enet_shared_remove), .driver = { .name = "bcm63xx_enet_shared", .owner = THIS_MODULE, }, }; /* * entry point */ static int __init bcm_enet_init(void) { int ret; ret = platform_driver_register(&bcm63xx_enet_shared_driver); if (ret) return ret; ret = platform_driver_register(&bcm63xx_enet_driver); if (ret) platform_driver_unregister(&bcm63xx_enet_shared_driver); return ret; } static void __exit bcm_enet_exit(void) { platform_driver_unregister(&bcm63xx_enet_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } module_init(bcm_enet_init); module_exit(bcm_enet_exit); MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_LICENSE("GPL");
gpl-2.0
PerthCharles/tcpcomment
linux-3.10/net/caif/caif_socket.c
405
27353
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/tcp.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/caif/caif_socket.h> #include <linux/pkt_sched.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/caif/caif_layer.h> #include <net/caif/caif_dev.h> #include <net/caif/cfpkt.h> MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_CAIF); /* * CAIF state is re-using the TCP socket states. * caif_states stored in sk_state reflect the state as reported by * the CAIF stack, while sk_socket->state is the state of the socket. */ enum caif_states { CAIF_CONNECTED = TCP_ESTABLISHED, CAIF_CONNECTING = TCP_SYN_SENT, CAIF_DISCONNECTED = TCP_CLOSE }; #define TX_FLOW_ON_BIT 1 #define RX_FLOW_ON_BIT 2 struct caifsock { struct sock sk; /* must be first member */ struct cflayer layer; u32 flow_state; struct caif_connect_request conn_req; struct mutex readlock; struct dentry *debugfs_socket_dir; int headroom, tailroom, maxframe; }; static int rx_flow_is_on(struct caifsock *cf_sk) { return test_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static int tx_flow_is_on(struct caifsock *cf_sk) { return test_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_rx_flow_off(struct caifsock *cf_sk) { clear_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_rx_flow_on(struct caifsock *cf_sk) { set_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_tx_flow_off(struct caifsock *cf_sk) { clear_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_tx_flow_on(struct caifsock *cf_sk) { set_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void caif_read_lock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_lock(&cf_sk->readlock); } static void caif_read_unlock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_unlock(&cf_sk->readlock); } static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) { /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; } static void caif_flow_ctrl(struct sock *sk, int mode) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); } /* * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * not dropped, but CAIF is sending flow off instead. */ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } err = sk_filter(sk, skb); if (err) return err; if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ skb_len = skb->len; spin_lock_irqsave(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); else kfree_skb(skb); return 0; } /* Packet Receive Callback function called from CAIF Stack */ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) { struct caifsock *cf_sk; struct sk_buff *skb; cf_sk = container_of(layr, struct caifsock, layer); skb = cfpkt_tonative(pkt); if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { kfree_skb(skb); return 0; } caif_queue_rcv_skb(&cf_sk->sk, skb); return 0; } static void cfsk_hold(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_hold(&cf_sk->sk); } static void cfsk_put(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_put(&cf_sk->sk); } /* Packet Control Callback function called from CAIF */ static void caif_ctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, int phyid) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); switch (flow) { case CAIF_CTRLCMD_FLOW_ON_IND: /* OK from modem to start sending again */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_FLOW_OFF_IND: /* Modem asks us to shut up */ set_tx_flow_off(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_RSP: /* We're now connected */ caif_client_register_refcnt(&cf_sk->layer, cfsk_hold, cfsk_put); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); cf_sk->sk.sk_shutdown = 0; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_DEINIT_RSP: /* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_FAIL_RSP: /* Connect request failed */ cf_sk->sk.sk_err = ECONNREFUSED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; /* * Socket "standards" seems to require POLLOUT to * be set at connect failure. */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: /* Modem has closed this connection, or device is down. */ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; cf_sk->sk.sk_err = ECONNRESET; set_rx_flow_on(cf_sk); cf_sk->sk.sk_error_report(&cf_sk->sk); break; default: pr_debug("Unexpected flow command %d\n", flow); } } static void caif_check_flow_release(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (rx_flow_is_on(cf_sk)) return; if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { set_rx_flow_on(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); } } /* * Copied from unix_dgram_recvmsg, but removed credit checks, * changed locking, address handling and added MSG_TRUNC. */ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; } /* Copied from unix_stream_wait_data, identical except for lock call. */ static long caif_stream_data_wait(struct sock *sk, long timeo) { DEFINE_WAIT(wait); lock_sock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || sk->sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk_sleep(sk), &wait); release_sock(sk); return timeo; } /* * Copied from unix_stream_recvmsg, but removed credit checks, * changed locking calls, changed address handling. */ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int copied = 0; int target; int err = 0; long timeo; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; msg->msg_namelen = 0; /* * Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ err = -EAGAIN; if (sk->sk_state == CAIF_CONNECTING) goto out; caif_read_lock(sk); target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); do { int chunk; struct sk_buff *skb; lock_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); caif_check_flow_release(sk); if (skb == NULL) { if (copied >= target) goto unlock; /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) goto unlock; err = -ECONNRESET; if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; err = -EPIPE; if (sk->sk_state != CAIF_CONNECTED) goto unlock; if (sock_flag(sk, SOCK_DEAD)) goto unlock; release_sock(sk); err = -EAGAIN; if (!timeo) break; caif_read_unlock(sk); timeo = caif_stream_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } caif_read_lock(sk); continue; unlock: release_sock(sk); break; } release_sock(sk); chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { skb_pull(skb, chunk); /* put the skb back if we didn't use it up. */ if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* * It is questionable, see note in unix_dgram_recvmsg. */ /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); caif_read_unlock(sk); out: return copied ? : err; } /* * Copied from sock.c:sock_wait_for_wmem, but change to wait for * CAIF flow-on and sock_writable. */ static long caif_wait_for_flow_on(struct caifsock *cf_sk, int wait_writeable, long timeo, int *err) { struct sock *sk = &cf_sk->sk; DEFINE_WAIT(wait); for (;;) { *err = 0; if (tx_flow_is_on(cf_sk) && (!wait_writeable || sock_writeable(&cf_sk->sk))) break; *err = -ETIMEDOUT; if (!timeo) break; *err = -ERESTARTSYS; if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); *err = -ECONNRESET; if (sk->sk_shutdown & SHUTDOWN_MASK) break; *err = -sk->sk_err; if (sk->sk_err) break; *err = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED) break; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(sk), &wait); return timeo; } /* * Transmit a SKB. The device may temporarily request re-transmission * by returning EAGAIN. */ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); return -EINVAL; } return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); } /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); if (ret) goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; /* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); if (!skb || skb_tailroom(skb) < buffer_size) goto err; skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) /* skb is already freed */ return ret; return len; err: kfree_skb(skb); return ret; } /* * Copied from unix_stream_sendmsg and adapted to CAIF: * Changed removed permission handling and added waiting for flow on * and other minor adaptations. */ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int err, size; struct sk_buff *skb; int sent = 0; long timeo; err = -EOPNOTSUPP; if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; if (unlikely(msg->msg_namelen)) goto out_err; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) goto pipe_err; while (sent < len) { size = len-sent; if (size > cf_sk->maxframe) size = cf_sk->maxframe; /* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; skb = sock_alloc_send_skb(sk, size + cf_sk->headroom + cf_sk->tailroom, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; skb_reserve(skb, cf_sk->headroom); /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); goto out_err; } err = transmit_skb(skb, cf_sk, msg->msg_flags&MSG_DONTWAIT, timeo); if (err < 0) /* skb is already freed */ goto pipe_err; sent += size; } return sent; pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: return sent ? : err; } static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov, unsigned int ol) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int linksel; if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) return -ENOPROTOOPT; switch (opt) { case CAIFSO_LINK_SELECT: if (ol < sizeof(int)) return -EINVAL; if (lvl != SOL_CAIF) goto bad_sol; if (copy_from_user(&linksel, ov, sizeof(int))) return -EINVAL; lock_sock(&(cf_sk->sk)); cf_sk->conn_req.link_selector = linksel; release_sock(&cf_sk->sk); return 0; case CAIFSO_REQ_PARAM: if (lvl != SOL_CAIF) goto bad_sol; if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); if (ol > sizeof(cf_sk->conn_req.param.data) || copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } cf_sk->conn_req.param.size = ol; release_sock(&cf_sk->sk); return 0; default: return -ENOPROTOOPT; } return 0; bad_sol: return -ENOPROTOOPT; } /* * caif_connect() - Connect a CAIF Socket * Copied and modified af_irda.c:irda_connect(). * * Note : by consulting "errno", the user space caller may learn the cause * of the failure. Most of them are visible in the function, others may come * from subroutines called and are listed here : * o -EAFNOSUPPORT: bad socket family or type. * o -ESOCKTNOSUPPORT: bad socket type or protocol * o -EINVAL: bad socket address, or CAIF link type * o -ECONNREFUSED: remote end refused the connection. * o -EINPROGRESS: connect request sent but timed out (or non-blocking) * o -EISCONN: already connected. * o -ETIMEDOUT: Connection timed out (send timeout) * o -ENODEV: No link layer to send request * o -ECONNRESET: Received Shutdown indication or lost link layer * o -ENOMEM: Out of memory * * State Strategy: * o sk_state: holds the CAIF_* protocol state, it's updated by * caif_ctrl_cb. * o sock->state: holds the SS_* socket state and is updated by connect and * disconnect. */ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; int ifindex, headroom, tailroom; unsigned int mtu; struct net_device *dev; lock_sock(sk); err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ caif_assert(sk->sk_state == CAIF_DISCONNECTED); break; case SS_CONNECTING: switch (sk->sk_state) { case CAIF_CONNECTED: sock->state = SS_CONNECTED; err = -EISCONN; goto out; case CAIF_DISCONNECTED: /* Reconnect allowed */ break; case CAIF_CONNECTING: err = -EALREADY; if (flags & O_NONBLOCK) goto out; goto wait_connect; } break; case SS_CONNECTED: caif_assert(sk->sk_state == CAIF_CONNECTED || sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_free_client(&cf_sk->layer); break; } /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out; case SS_DISCONNECTING: case SS_FREE: caif_assert(1); /*Should never happen */ break; } sk->sk_state = CAIF_DISCONNECTED; sock->state = SS_UNCONNECTED; sk_stream_kill_queues(&cf_sk->sk); err = -EINVAL; if (addr_len != sizeof(struct sockaddr_caif)) goto out; memcpy(&cf_sk->conn_req.sockaddr, uaddr, sizeof(struct sockaddr_caif)); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; /* Check priority value comming from socket */ /* if priority value is out of range it will be ajusted */ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) cf_sk->conn_req.priority = CAIF_PRIO_MAX; else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) cf_sk->conn_req.priority = CAIF_PRIO_MIN; else cf_sk->conn_req.priority = cf_sk->sk.sk_priority; /*ifindex = id of the interface.*/ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, &cf_sk->layer, &ifindex, &headroom, &tailroom); if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; } err = -ENODEV; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { rcu_read_unlock(); goto out; } cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); mtu = dev->mtu; rcu_read_unlock(); cf_sk->tailroom = tailroom; cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); err = -ENODEV; goto out; } err = -EINPROGRESS; wait_connect: if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); err = -ERESTARTSYS; timeo = wait_event_interruptible_timeout(*sk_sleep(sk), sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); if (timeo < 0) goto out; /* -ERESTARTSYS */ err = -ETIMEDOUT; if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) goto out; if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNREFUSED; goto out; } sock->state = SS_CONNECTED; err = 0; out: release_sock(sk); return err; } /* * caif_release() - Disconnect a CAIF Socket * Copied and modified af_irda.c:irda_release(). */ static int caif_release(struct socket *sock) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (!sk) return 0; set_tx_flow_off(cf_sk); /* * Ensure that packets are not queued after this point in time. * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. */ spin_lock_bh(&sk->sk_receive_queue.lock); sock_set_flag(sk, SOCK_DEAD); spin_unlock_bh(&sk->sk_receive_queue.lock); sock->sk = NULL; WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); lock_sock(&(cf_sk->sk)); sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); sock_orphan(sk); sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); return 0; } /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ static unsigned int caif_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static const struct proto_ops caif_seqpacket_ops = { .family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops caif_stream_ops = { .family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; /* This function is called when a socket is finally destroyed. */ static void caif_sock_destructor(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_debug("Attempt to release alive CAIF socket: %p\n", sk); return; } sk_stream_kill_queues(&cf_sk->sk); caif_free_client(&cf_sk->layer); } static int caif_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk = NULL; struct caifsock *cf_sk = NULL; static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), }; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) return -EPERM; /* * The sock->type specifies the socket type to use. * The CAIF socket is a packet stream in the sense * that it is packet based. CAIF trusts the reliability * of the link, no resending is implemented. */ if (sock->type == SOCK_SEQPACKET) sock->ops = &caif_seqpacket_ops; else if (sock->type == SOCK_STREAM) sock->ops = &caif_stream_ops; else return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= CAIFPROTO_MAX) return -EPROTONOSUPPORT; /* * Set the socket state to unconnected. The socket state * is really not used at all in the net/core or socket.c but the * initialization makes sure that sock->state is not uninitialized. */ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); if (!sk) return -ENOMEM; cf_sk = container_of(sk, struct caifsock, sk); /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; /* Initialize default priority for well-known cases */ switch (protocol) { case CAIFPROTO_AT: sk->sk_priority = TC_PRIO_CONTROL; break; case CAIFPROTO_RFM: sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; break; default: sk->sk_priority = TC_PRIO_BESTEFFORT; } /* * Lock in order to try to stop someone from opening the socket * too early. */ lock_sock(&(cf_sk->sk)); /* Initialize the nozero default sock structure data. */ sock_init_data(sock, sk); sk->sk_destruct = caif_sock_destructor; mutex_init(&cf_sk->readlock); /* single task reading lock */ cf_sk->layer.ctrlcmd = caif_ctrl_cb; cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; set_tx_flow_off(cf_sk); set_rx_flow_on(cf_sk); /* Set default options on configuration */ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); return 0; } static struct net_proto_family caif_family_ops = { .family = PF_CAIF, .create = caif_create, .owner = THIS_MODULE, }; static int __init caif_sktinit_module(void) { int err = sock_register(&caif_family_ops); if (!err) return err; return 0; } static void __exit caif_sktexit_module(void) { sock_unregister(PF_CAIF); } module_init(caif_sktinit_module); module_exit(caif_sktexit_module);
gpl-2.0
flaming-toast/unrm
net/ipv4/ip_gre.c
405
24919
/* * Linux NET3: GRE over IP protocol decoder. * * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/mroute.h> #include <linux/init.h> #include <linux/in6.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/netfilter_ipv4.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/ip_tunnels.h> #include <net/arp.h> #include <net/checksum.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/gre.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> #endif /* Problems & solutions -------------------- 1. The most important issue is detecting local dead loops. They would cause complete host lockup in transmit, which would be "resolved" by stack overflow or, if queueing is enabled, with infinite looping in net_bh. We cannot track such dead loops during route installation, it is infeasible task. The most general solutions would be to keep skb->encapsulation counter (sort of local ttl), and silently drop packet when it expires. It is a good solution, but it supposes maintaining new variable in ALL skb, even if no tunneling is used. Current solution: xmit_recursion breaks dead loops. This is a percpu counter, since when we enter the first ndo_xmit(), cpu migration is forbidden. We force an exit if this counter reaches RECURSION_LIMIT 2. Networking dead loops would not kill routers, but would really kill network. IP hop limit plays role of "t->recursion" in this case, if we copy it from packet being encapsulated to upper header. It is very good solution, but it introduces two problems: - Routing protocols, using packets with ttl=1 (OSPF, RIP2), do not work over tunnels. - traceroute does not work. I planned to relay ICMP from tunnel, so that this problem would be solved and traceroute output would even more informative. This idea appeared to be wrong: only Linux complies to rfc1812 now (yes, guys, Linux is the only true router now :-)), all routers (at least, in neighbourhood of mine) return only 8 bytes of payload. It is the end. Hence, if we want that OSPF worked or traceroute said something reasonable, we should search for another solution. One of them is to parse packet trying to detect inner encapsulation made by our node. It is difficult or even impossible, especially, taking into account fragmentation. TO be short, ttl is not solution at all. Current solution: The solution was UNEXPECTEDLY SIMPLE. We force DF flag on tunnels with preconfigured hop limit, that is ALL. :-) Well, it does not remove the problem completely, but exponential growth of network traffic is changed to linear (branches, that exceed pmtu are pruned) and tunnel mtu rapidly degrades to value <68, where looping stops. Yes, it is not good if there exists a router in the loop, which does not force DF, even when encapsulating packets have DF set. But it is not our problem! Nobody could accuse us, we made all that we could make. Even if it is your gated who injected fatal route to network, even if it were you who configured fatal static route: you are innocent. :-) Alexey Kuznetsov. */ static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static struct rtnl_link_ops ipgre_link_ops __read_mostly; static int ipgre_tunnel_init(struct net_device *dev); static int ipgre_net_id __read_mostly; static int gre_tap_net_id __read_mostly; static int ipgre_err(struct sk_buff *skb, u32 info, const struct tnl_ptk_info *tpi) { /* All the routers (except for Linux) return only 8 bytes of packet payload. It means, that precise relaying of ICMP in the real Internet is absolutely infeasible. Moreover, Cisco "wise men" put GRE key to the third word in GRE header. It makes impossible maintaining even soft state for keyed GRE tunnels with enabled checksum. Tell them "thank you". Well, I wonder, rfc1812 was written by Cisco employee, what the hell these idiots break standards established by themselves??? */ struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn; const struct iphdr *iph; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; switch (type) { default: case ICMP_PARAMETERPROB: return PACKET_RCVD; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ return PACKET_RCVD; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, I believe they are just ether pollution. --ANK */ break; } break; case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return PACKET_RCVD; break; case ICMP_REDIRECT: break; } if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = (const struct iphdr *)(icmp_hdr(skb) + 1); t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->daddr, iph->saddr, tpi->key); if (!t) return PACKET_REJECT; if (t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr)) return PACKET_RCVD; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) return PACKET_RCVD; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; return PACKET_RCVD; } static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { skb_pop_mac_header(skb); ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); return PACKET_RCVD; } return PACKET_REJECT; } static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ gre_build_header(skb, &tpi, tunnel->tun_hlen); skb_set_inner_protocol(skb, tpi.proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } static netdev_tx_t ipgre_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tnl_params; if (dev->header_ops) { /* Need space for new headers */ if (skb_cow_head(skb, dev->needed_headroom - (tunnel->hlen + sizeof(struct iphdr)))) goto free_skb; tnl_params = (const struct iphdr *)skb->data; /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); skb_reset_mac_header(skb); } else { if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; tnl_params = &tunnel->parms.iph; } skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; __gre_xmit(skb, dev, tnl_params, skb->protocol); return NETDEV_TX_OK; free_skb: kfree_skb(skb); out: dev->stats.tx_dropped++; return NETDEV_TX_OK; } static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); return NETDEV_TX_OK; free_skb: kfree_skb(skb); out: dev->stats.tx_dropped++; return NETDEV_TX_OK; } static int ipgre_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err; struct ip_tunnel_parm p; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) return -EFAULT; if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE || p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) || ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) return -EINVAL; } p.i_flags = gre_flags_to_tnl_flags(p.i_flags); p.o_flags = gre_flags_to_tnl_flags(p.o_flags); err = ip_tunnel_ioctl(dev, &p, cmd); if (err) return err; p.i_flags = tnl_flags_to_gre_flags(p.i_flags); p.o_flags = tnl_flags_to_gre_flags(p.o_flags); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) return -EFAULT; return 0; } /* Nice toy. Unfortunately, useless in real life :-) It allows to construct virtual multiprotocol broadcast "LAN" over the Internet, provided multicast routing is tuned. I have no idea was this bicycle invented before me, so that I had to set ARPHRD_IPGRE to a random value. I have an impression, that Cisco could make something similar, but this feature is apparently missing in IOS<=11.2(8). I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks with broadcast 224.66.66.66. If you have access to mbone, play with me :-) ping -t 255 224.66.66.66 If nobody answers, mbone does not work. ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 ip addr add 10.66.66.<somewhat>/24 dev Universe ifconfig Universe up ifconfig Universe add fe80::<Your_real_addr>/10 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 ftp 10.66.66.66 ... ftp fec0:6666:6666::193.233.7.65 ... */ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct ip_tunnel *t = netdev_priv(dev); struct iphdr *iph; struct gre_base_hdr *greh; iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph)); greh = (struct gre_base_hdr *)(iph+1); greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags); greh->protocol = htons(type); memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); /* Set the source hardware address. */ if (saddr) memcpy(&iph->saddr, saddr, 4); if (daddr) memcpy(&iph->daddr, daddr, 4); if (iph->daddr) return t->hlen + sizeof(*iph); return -(t->hlen + sizeof(*iph)); } static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); memcpy(haddr, &iph->saddr, 4); return 4; } static const struct header_ops ipgre_header_ops = { .create = ipgre_header, .parse = ipgre_header_parse, }; #ifdef CONFIG_NET_IPGRE_BROADCAST static int ipgre_open(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); if (ipv4_is_multicast(t->parms.iph.daddr)) { struct flowi4 fl4; struct rtable *rt; rt = ip_route_output_gre(t->net, &fl4, t->parms.iph.daddr, t->parms.iph.saddr, t->parms.o_key, RT_TOS(t->parms.iph.tos), t->parms.link); if (IS_ERR(rt)) return -EADDRNOTAVAIL; dev = rt->dst.dev; ip_rt_put(rt); if (!__in_dev_get_rtnl(dev)) return -EADDRNOTAVAIL; t->mlink = dev->ifindex; ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); } return 0; } static int ipgre_close(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { struct in_device *in_dev; in_dev = inetdev_by_index(t->net, t->mlink); if (in_dev) ip_mc_dec_group(in_dev, t->parms.iph.daddr); } return 0; } #endif static const struct net_device_ops ipgre_netdev_ops = { .ndo_init = ipgre_tunnel_init, .ndo_uninit = ip_tunnel_uninit, #ifdef CONFIG_NET_IPGRE_BROADCAST .ndo_open = ipgre_open, .ndo_stop = ipgre_close, #endif .ndo_start_xmit = ipgre_xmit, .ndo_do_ioctl = ipgre_tunnel_ioctl, .ndo_change_mtu = ip_tunnel_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_iflink = ip_tunnel_get_iflink, }; #define GRE_FEATURES (NETIF_F_SG | \ NETIF_F_FRAGLIST | \ NETIF_F_HIGHDMA | \ NETIF_F_HW_CSUM) static void ipgre_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipgre_netdev_ops; dev->type = ARPHRD_IPGRE; ip_tunnel_setup(dev, ipgre_net_id); } static void __gre_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel; int t_hlen; tunnel = netdev_priv(dev); tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; t_hlen = tunnel->hlen + sizeof(struct iphdr); dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; dev->mtu = ETH_DATA_LEN - t_hlen - 4; dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { /* TCP offload with GRE SEQ is not supported. */ dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_GSO_SOFTWARE; /* Can use a lockless transmit, unless we generate * output sequences */ dev->features |= NETIF_F_LLTX; } } static int ipgre_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; __gre_tunnel_init(dev); memcpy(dev->dev_addr, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); dev->flags = IFF_NOARP; netif_keep_dst(dev); dev->addr_len = 4; if (iph->daddr) { #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { if (!iph->saddr) return -EINVAL; dev->flags = IFF_BROADCAST; dev->header_ops = &ipgre_header_ops; } #endif } else dev->header_ops = &ipgre_header_ops; return ip_tunnel_init(dev); } static struct gre_cisco_protocol ipgre_protocol = { .handler = ipgre_rcv, .err_handler = ipgre_err, .priority = 0, }; static int __net_init ipgre_init_net(struct net *net) { return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL); } static void __net_exit ipgre_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id); ip_tunnel_delete_net(itn, &ipgre_link_ops); } static struct pernet_operations ipgre_net_ops = { .init = ipgre_init_net, .exit = ipgre_exit_net, .id = &ipgre_net_id, .size = sizeof(struct ip_tunnel_net), }; static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) { __be16 flags; if (!data) return 0; flags = 0; if (data[IFLA_GRE_IFLAGS]) flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); if (data[IFLA_GRE_OFLAGS]) flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); if (flags & (GRE_VERSION|GRE_ROUTING)) return -EINVAL; return 0; } static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) { __be32 daddr; if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) goto out; if (data[IFLA_GRE_REMOTE]) { memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); if (!daddr) return -EINVAL; } out: return ipgre_tunnel_validate(tb, data); } static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], struct ip_tunnel_parm *parms) { memset(parms, 0, sizeof(*parms)); parms->iph.protocol = IPPROTO_GRE; if (!data) return; if (data[IFLA_GRE_LINK]) parms->link = nla_get_u32(data[IFLA_GRE_LINK]); if (data[IFLA_GRE_IFLAGS]) parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS])); if (data[IFLA_GRE_OFLAGS]) parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS])); if (data[IFLA_GRE_IKEY]) parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); if (data[IFLA_GRE_OKEY]) parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); if (data[IFLA_GRE_LOCAL]) parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]); if (data[IFLA_GRE_REMOTE]) parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]); if (data[IFLA_GRE_TTL]) parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); if (data[IFLA_GRE_TOS]) parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) parms->iph.frag_off = htons(IP_DF); } /* This function returns true when ENCAP attributes are present in the nl msg */ static bool ipgre_netlink_encap_parms(struct nlattr *data[], struct ip_tunnel_encap *ipencap) { bool ret = false; memset(ipencap, 0, sizeof(*ipencap)); if (!data) return ret; if (data[IFLA_GRE_ENCAP_TYPE]) { ret = true; ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); } if (data[IFLA_GRE_ENCAP_FLAGS]) { ret = true; ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); } if (data[IFLA_GRE_ENCAP_SPORT]) { ret = true; ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); } if (data[IFLA_GRE_ENCAP_DPORT]) { ret = true; ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); } return ret; } static int gre_tap_init(struct net_device *dev) { __gre_tunnel_init(dev); dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; return ip_tunnel_init(dev); } static const struct net_device_ops gre_tap_netdev_ops = { .ndo_init = gre_tap_init, .ndo_uninit = ip_tunnel_uninit, .ndo_start_xmit = gre_tap_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ip_tunnel_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_iflink = ip_tunnel_get_iflink, }; static void ipgre_tap_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &gre_tap_netdev_ops; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ip_tunnel_setup(dev, gre_tap_net_id); } static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); int err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_newlink(dev, tb, &p); } static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); int err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_changelink(dev, tb, &p); } static size_t ipgre_get_size(const struct net_device *dev) { return /* IFLA_GRE_LINK */ nla_total_size(4) + /* IFLA_GRE_IFLAGS */ nla_total_size(2) + /* IFLA_GRE_OFLAGS */ nla_total_size(2) + /* IFLA_GRE_IKEY */ nla_total_size(4) + /* IFLA_GRE_OKEY */ nla_total_size(4) + /* IFLA_GRE_LOCAL */ nla_total_size(4) + /* IFLA_GRE_REMOTE */ nla_total_size(4) + /* IFLA_GRE_TTL */ nla_total_size(1) + /* IFLA_GRE_TOS */ nla_total_size(1) + /* IFLA_GRE_PMTUDISC */ nla_total_size(1) + /* IFLA_GRE_ENCAP_TYPE */ nla_total_size(2) + /* IFLA_GRE_ENCAP_FLAGS */ nla_total_size(2) + /* IFLA_GRE_ENCAP_SPORT */ nla_total_size(2) + /* IFLA_GRE_ENCAP_DPORT */ nla_total_size(2) + 0; } static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) || nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || nla_put_u8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, t->encap.type) || nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, t->encap.sport) || nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, t->encap.dport) || nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, t->encap.flags)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_LINK] = { .type = NLA_U32 }, [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, [IFLA_GRE_IKEY] = { .type = NLA_U32 }, [IFLA_GRE_OKEY] = { .type = NLA_U32 }, [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, [IFLA_GRE_TTL] = { .type = NLA_U8 }, [IFLA_GRE_TOS] = { .type = NLA_U8 }, [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { .kind = "gre", .maxtype = IFLA_GRE_MAX, .policy = ipgre_policy, .priv_size = sizeof(struct ip_tunnel), .setup = ipgre_tunnel_setup, .validate = ipgre_tunnel_validate, .newlink = ipgre_newlink, .changelink = ipgre_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, .fill_info = ipgre_fill_info, .get_link_net = ip_tunnel_get_link_net, }; static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { .kind = "gretap", .maxtype = IFLA_GRE_MAX, .policy = ipgre_policy, .priv_size = sizeof(struct ip_tunnel), .setup = ipgre_tap_setup, .validate = ipgre_tap_validate, .newlink = ipgre_newlink, .changelink = ipgre_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, .fill_info = ipgre_fill_info, .get_link_net = ip_tunnel_get_link_net, }; static int __net_init ipgre_tap_init_net(struct net *net) { return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL); } static void __net_exit ipgre_tap_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id); ip_tunnel_delete_net(itn, &ipgre_tap_ops); } static struct pernet_operations ipgre_tap_net_ops = { .init = ipgre_tap_init_net, .exit = ipgre_tap_exit_net, .id = &gre_tap_net_id, .size = sizeof(struct ip_tunnel_net), }; static int __init ipgre_init(void) { int err; pr_info("GRE over IPv4 tunneling driver\n"); err = register_pernet_device(&ipgre_net_ops); if (err < 0) return err; err = register_pernet_device(&ipgre_tap_net_ops); if (err < 0) goto pnet_tap_faied; err = gre_cisco_register(&ipgre_protocol); if (err < 0) { pr_info("%s: can't add protocol\n", __func__); goto add_proto_failed; } err = rtnl_link_register(&ipgre_link_ops); if (err < 0) goto rtnl_link_failed; err = rtnl_link_register(&ipgre_tap_ops); if (err < 0) goto tap_ops_failed; return 0; tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: gre_cisco_unregister(&ipgre_protocol); add_proto_failed: unregister_pernet_device(&ipgre_tap_net_ops); pnet_tap_faied: unregister_pernet_device(&ipgre_net_ops); return err; } static void __exit ipgre_fini(void) { rtnl_link_unregister(&ipgre_tap_ops); rtnl_link_unregister(&ipgre_link_ops); gre_cisco_unregister(&ipgre_protocol); unregister_pernet_device(&ipgre_tap_net_ops); unregister_pernet_device(&ipgre_net_ops); } module_init(ipgre_init); module_exit(ipgre_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("gre"); MODULE_ALIAS_RTNL_LINK("gretap"); MODULE_ALIAS_NETDEV("gre0"); MODULE_ALIAS_NETDEV("gretap0");
gpl-2.0
selva-simple/galaxyr_cm10_kernel
arch/powerpc/platforms/cell/spu_manage.c
405
13320
/* * spu management operations for of based platforms * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. * (C) Copyright 2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/prom.h> #include "spufs/spufs.h" #include "interrupt.h" struct device_node *spu_devnode(struct spu *spu) { return spu->devnode; } EXPORT_SYMBOL_GPL(spu_devnode); static u64 __init find_spu_unit_number(struct device_node *spe) { const unsigned int *prop; int proplen; /* new device trees should provide the physical-id attribute */ prop = of_get_property(spe, "physical-id", &proplen); if (proplen == 4) return (u64)*prop; /* celleb device tree provides the unit-id */ prop = of_get_property(spe, "unit-id", &proplen); if (proplen == 4) return (u64)*prop; /* legacy device trees provide the id in the reg attribute */ prop = of_get_property(spe, "reg", &proplen); if (proplen == 4) return (u64)*prop; return 0; } static void spu_unmap(struct spu *spu) { if (!firmware_has_feature(FW_FEATURE_LPAR)) iounmap(spu->priv1); iounmap(spu->priv2); iounmap(spu->problem); iounmap((__force u8 __iomem *)spu->local_store); } static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) { unsigned int isrc; const u32 *tmp; int nid; /* Get the interrupt source unit from the device-tree */ tmp = of_get_property(np, "isrc", NULL); if (!tmp) return -ENODEV; isrc = tmp[0]; tmp = of_get_property(np->parent->parent, "node-id", NULL); if (!tmp) { printk(KERN_WARNING "%s: can't find node-id\n", __func__); nid = spu->node; } else nid = tmp[0]; /* Add the node number */ isrc |= nid << IIC_IRQ_NODE_SHIFT; /* Now map interrupts of all 3 classes */ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); /* Right now, we only fail if class 2 failed */ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; } static void __iomem * __init spu_map_prop_old(struct spu *spu, struct device_node *n, const char *name) { const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; int proplen; prop = of_get_property(n, name, &proplen); if (prop == NULL || proplen != sizeof (struct address_prop)) return NULL; return ioremap(prop->address, prop->len); } static int __init spu_map_device_old(struct spu *spu) { struct device_node *node = spu->devnode; const char *prop; int ret; ret = -ENODEV; spu->name = of_get_property(node, "name", NULL); if (!spu->name) goto out; prop = of_get_property(node, "local-store", NULL); if (!prop) goto out; spu->local_store_phys = *(unsigned long *)prop; /* we use local store as ram, not io memory */ spu->local_store = (void __force *) spu_map_prop_old(spu, node, "local-store"); if (!spu->local_store) goto out; prop = of_get_property(node, "problem", NULL); if (!prop) goto out_unmap; spu->problem_phys = *(unsigned long *)prop; spu->problem = spu_map_prop_old(spu, node, "problem"); if (!spu->problem) goto out_unmap; spu->priv2 = spu_map_prop_old(spu, node, "priv2"); if (!spu->priv2) goto out_unmap; if (!firmware_has_feature(FW_FEATURE_LPAR)) { spu->priv1 = spu_map_prop_old(spu, node, "priv1"); if (!spu->priv1) goto out_unmap; } ret = 0; goto out; out_unmap: spu_unmap(spu); out: return ret; } static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { struct of_irq oirq; int ret; int i; for (i=0; i < 3; i++) { ret = of_irq_map_one(np, i, &oirq); if (ret) { pr_debug("spu_new: failed to get irq %d\n", i); goto err; } ret = -EINVAL; pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0], oirq.controller->full_name); spu->irqs[i] = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); if (spu->irqs[i] == NO_IRQ) { pr_debug("spu_new: failed to map it !\n"); goto err; } } return 0; err: pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name); for (; i >= 0; i--) { if (spu->irqs[i] != NO_IRQ) irq_dispose_mapping(spu->irqs[i]); } return ret; } static int spu_map_resource(struct spu *spu, int nr, void __iomem** virt, unsigned long *phys) { struct device_node *np = spu->devnode; struct resource resource = { }; unsigned long len; int ret; ret = of_address_to_resource(np, nr, &resource); if (ret) return ret; if (phys) *phys = resource.start; len = resource_size(&resource); *virt = ioremap(resource.start, len); if (!*virt) return -EINVAL; return 0; } static int __init spu_map_device(struct spu *spu) { struct device_node *np = spu->devnode; int ret = -ENODEV; spu->name = of_get_property(np, "name", NULL); if (!spu->name) goto out; ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, &spu->local_store_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 0\n", np->full_name); goto out; } ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, &spu->problem_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 1\n", np->full_name); goto out_unmap; } ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 2\n", np->full_name); goto out_unmap; } if (!firmware_has_feature(FW_FEATURE_LPAR)) ret = spu_map_resource(spu, 3, (void __iomem**)&spu->priv1, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 3\n", np->full_name); goto out_unmap; } pr_debug("spu_new: %s maps:\n", np->full_name); pr_debug(" local store : 0x%016lx -> 0x%p\n", spu->local_store_phys, spu->local_store); pr_debug(" problem state : 0x%016lx -> 0x%p\n", spu->problem_phys, spu->problem); pr_debug(" priv2 : 0x%p\n", spu->priv2); pr_debug(" priv1 : 0x%p\n", spu->priv1); return 0; out_unmap: spu_unmap(spu); out: pr_debug("failed to map spe %s: %d\n", spu->name, ret); return ret; } static int __init of_enumerate_spus(int (*fn)(void *data)) { int ret; struct device_node *node; unsigned int n = 0; ret = -ENODEV; for (node = of_find_node_by_type(NULL, "spe"); node; node = of_find_node_by_type(node, "spe")) { ret = fn(node); if (ret) { printk(KERN_WARNING "%s: Error initializing %s\n", __func__, node->name); break; } n++; } return ret ? ret : n; } static int __init of_create_spu(struct spu *spu, void *data) { int ret; struct device_node *spe = (struct device_node *)data; static int legacy_map = 0, legacy_irq = 0; spu->devnode = of_node_get(spe); spu->spe_id = find_spu_unit_number(spe); spu->node = of_node_to_nid(spe); if (spu->node >= MAX_NUMNODES) { printk(KERN_WARNING "SPE %s on node %d ignored," " node number too big\n", spe->full_name, spu->node); printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); ret = -ENODEV; goto out; } ret = spu_map_device(spu); if (ret) { if (!legacy_map) { legacy_map = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying to map old style\n", __func__); } ret = spu_map_device_old(spu); if (ret) { printk(KERN_ERR "Unable to map %s\n", spu->name); goto out; } } ret = spu_map_interrupts(spu, spe); if (ret) { if (!legacy_irq) { legacy_irq = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying old style irq\n", __func__); } ret = spu_map_interrupts_old(spu, spe); if (ret) { printk(KERN_ERR "%s: could not map interrupts\n", spu->name); goto out_unmap; } } pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, spu->local_store, spu->problem, spu->priv1, spu->priv2, spu->number); goto out; out_unmap: spu_unmap(spu); out: return ret; } static int of_destroy_spu(struct spu *spu) { spu_unmap(spu); of_node_put(spu->devnode); return 0; } static void enable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_start(ctx); } static void disable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_stop(ctx); } /* Hardcoded affinity idxs for qs20 */ #define QS20_SPES_PER_BE 8 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; static struct spu *spu_lookup_reg(int node, u32 reg) { struct spu *spu; const u32 *spu_reg; list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); if (*spu_reg == reg) return spu; } return NULL; } static void init_affinity_qs20_harcoded(void) { int node, i; struct spu *last_spu, *spu; u32 reg; for (node = 0; node < MAX_NUMNODES; node++) { last_spu = NULL; for (i = 0; i < QS20_SPES_PER_BE; i++) { reg = qs20_reg_idxs[i]; spu = spu_lookup_reg(node, reg); if (!spu) continue; spu->has_mem_affinity = qs20_reg_memory[reg]; if (last_spu) list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; } } } static int of_has_vicinity(void) { struct device_node *dn; for_each_node_by_type(dn, "spe") { if (of_find_property(dn, "vicinity", NULL)) { of_node_put(dn); return 1; } } return 0; } static struct spu *devnode_spu(int cbe, struct device_node *dn) { struct spu *spu; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) if (spu_devnode(spu) == dn) return spu; return NULL; } static struct spu * neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) { struct spu *spu; struct device_node *spu_dn; const phandle *vic_handles; int lenp, i; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { spu_dn = spu_devnode(spu); if (spu_dn == avoid) continue; vic_handles = of_get_property(spu_dn, "vicinity", &lenp); for (i=0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == target->phandle) return spu; } } return NULL; } static void init_affinity_node(int cbe) { struct spu *spu, *last_spu; struct device_node *vic_dn, *last_spu_dn; phandle avoid_ph; const phandle *vic_handles; const char *name; int lenp, i, added; last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, cbe_list); avoid_ph = 0; for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { last_spu_dn = spu_devnode(last_spu); vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); /* * Walk through each phandle in vicinity property of the spu * (tipically two vicinity phandles per spe node) */ for (i = 0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == avoid_ph) continue; vic_dn = of_find_node_by_phandle(vic_handles[i]); if (!vic_dn) continue; /* a neighbour might be spe, mic-tm, or bif0 */ name = of_get_property(vic_dn, "name", NULL); if (!name) continue; if (strcmp(name, "spe") == 0) { spu = devnode_spu(cbe, vic_dn); avoid_ph = last_spu_dn->phandle; } else { /* * "mic-tm" and "bif0" nodes do not have * vicinity property. So we need to find the * spe which has vic_dn as neighbour, but * skipping the one we came from (last_spu_dn) */ spu = neighbour_spu(cbe, vic_dn, last_spu_dn); if (!spu) continue; if (!strcmp(name, "mic-tm")) { last_spu->has_mem_affinity = 1; spu->has_mem_affinity = 1; } avoid_ph = vic_dn->phandle; } list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; break; } } } static void init_affinity_fw(void) { int cbe; for (cbe = 0; cbe < MAX_NUMNODES; cbe++) init_affinity_node(cbe); } static int __init init_affinity(void) { if (of_has_vicinity()) { init_affinity_fw(); } else { long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) init_affinity_qs20_harcoded(); else printk("No affinity configuration found\n"); } return 0; } const struct spu_management_ops spu_management_of_ops = { .enumerate_spus = of_enumerate_spus, .create_spu = of_create_spu, .destroy_spu = of_destroy_spu, .enable_spu = enable_spu_by_master_run, .disable_spu = disable_spu_by_master_run, .init_affinity = init_affinity, };
gpl-2.0
zjh3123629/linux-4.5.1
drivers/misc/ad525x_dpot-spi.c
661
2883
/* * Driver for the Analog Devices digital potentiometers (SPI bus) * * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/spi/spi.h> #include <linux/module.h> #include "ad525x_dpot.h" /* SPI bus functions */ static int write8(void *client, u8 val) { u8 data = val; return spi_write(client, &data, 1); } static int write16(void *client, u8 reg, u8 val) { u8 data[2] = {reg, val}; return spi_write(client, data, 2); } static int write24(void *client, u8 reg, u16 val) { u8 data[3] = {reg, val >> 8, val}; return spi_write(client, data, 3); } static int read8(void *client) { int ret; u8 data; ret = spi_read(client, &data, 1); if (ret < 0) return ret; return data; } static int read16(void *client, u8 reg) { int ret; u8 buf_rx[2]; write16(client, reg, 0); ret = spi_read(client, buf_rx, 2); if (ret < 0) return ret; return (buf_rx[0] << 8) | buf_rx[1]; } static int read24(void *client, u8 reg) { int ret; u8 buf_rx[3]; write24(client, reg, 0); ret = spi_read(client, buf_rx, 3); if (ret < 0) return ret; return (buf_rx[1] << 8) | buf_rx[2]; } static const struct ad_dpot_bus_ops bops = { .read_d8 = read8, .read_r8d8 = read16, .read_r8d16 = read24, .write_d8 = write8, .write_r8d8 = write16, .write_r8d16 = write24, }; static int ad_dpot_spi_probe(struct spi_device *spi) { struct ad_dpot_bus_data bdata = { .client = spi, .bops = &bops, }; return ad_dpot_probe(&spi->dev, &bdata, spi_get_device_id(spi)->driver_data, spi_get_device_id(spi)->name); } static int ad_dpot_spi_remove(struct spi_device *spi) { return ad_dpot_remove(&spi->dev); } static const struct spi_device_id ad_dpot_spi_id[] = { {"ad5160", AD5160_ID}, {"ad5161", AD5161_ID}, {"ad5162", AD5162_ID}, {"ad5165", AD5165_ID}, {"ad5200", AD5200_ID}, {"ad5201", AD5201_ID}, {"ad5203", AD5203_ID}, {"ad5204", AD5204_ID}, {"ad5206", AD5206_ID}, {"ad5207", AD5207_ID}, {"ad5231", AD5231_ID}, {"ad5232", AD5232_ID}, {"ad5233", AD5233_ID}, {"ad5235", AD5235_ID}, {"ad5260", AD5260_ID}, {"ad5262", AD5262_ID}, {"ad5263", AD5263_ID}, {"ad5290", AD5290_ID}, {"ad5291", AD5291_ID}, {"ad5292", AD5292_ID}, {"ad5293", AD5293_ID}, {"ad7376", AD7376_ID}, {"ad8400", AD8400_ID}, {"ad8402", AD8402_ID}, {"ad8403", AD8403_ID}, {"adn2850", ADN2850_ID}, {"ad5270", AD5270_ID}, {"ad5271", AD5271_ID}, {} }; MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id); static struct spi_driver ad_dpot_spi_driver = { .driver = { .name = "ad_dpot", }, .probe = ad_dpot_spi_probe, .remove = ad_dpot_spi_remove, .id_table = ad_dpot_spi_id, }; module_spi_driver(ad_dpot_spi_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ad_dpot");
gpl-2.0
AntonGitName/au-linux-kernel-spring-2016
linux/arch/x86/mm/pgtable_32.c
1429
2195
#include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/nmi.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/io.h> unsigned int __VMALLOC_RESERVE = 128 << 20; /* * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. */ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = swapper_pg_dir + pgd_index(vaddr); if (pgd_none(*pgd)) { BUG(); return; } pud = pud_offset(pgd, vaddr); if (pud_none(*pud)) { BUG(); return; } pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd)) { BUG(); return; } pte = pte_offset_kernel(pmd, vaddr); if (pte_val(pteval)) set_pte_at(&init_mm, vaddr, pte, pteval); else pte_clear(&init_mm, vaddr, pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; EXPORT_SYMBOL(__FIXADDR_TOP); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the * vmalloc area - the default is 128m. */ static int __init parse_vmalloc(char *arg) { if (!arg) return -EINVAL; /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; return 0; } early_param("vmalloc", parse_vmalloc); /* * reservetop=size reserves a hole at the top of the kernel address space which * a hypervisor can load into later. Needed for dynamically loaded hypervisors, * so relocating the fixmap can be done before paging initialization. */ static int __init parse_reservetop(char *arg) { unsigned long address; if (!arg) return -EINVAL; address = memparse(arg, &arg); reserve_top_address(address); early_ioremap_init(); return 0; } early_param("reservetop", parse_reservetop);
gpl-2.0
limitedev66/android_kernel_shooteru
drivers/mmc/host/mxs-mmc.c
1429
22922
/* * Portions copyright (C) 2003 Russell King, PXA MMCI Driver * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver * * Copyright 2008 Embedded Alley Solutions, Inc. * Copyright 2009-2011 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/highmem.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/completion.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <mach/mxs.h> #include <mach/common.h> #include <mach/dma.h> #include <mach/mmc.h> #define DRIVER_NAME "mxs-mmc" /* card detect polling timeout */ #define MXS_MMC_DETECT_TIMEOUT (HZ/2) #define SSP_VERSION_LATEST 4 #define ssp_is_old() (host->version < SSP_VERSION_LATEST) /* SSP registers */ #define HW_SSP_CTRL0 0x000 #define BM_SSP_CTRL0_RUN (1 << 29) #define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) #define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) #define BM_SSP_CTRL0_READ (1 << 25) #define BM_SSP_CTRL0_DATA_XFER (1 << 24) #define BP_SSP_CTRL0_BUS_WIDTH (22) #define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) #define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) #define BM_SSP_CTRL0_LONG_RESP (1 << 19) #define BM_SSP_CTRL0_GET_RESP (1 << 17) #define BM_SSP_CTRL0_ENABLE (1 << 16) #define BP_SSP_CTRL0_XFER_COUNT (0) #define BM_SSP_CTRL0_XFER_COUNT (0xffff) #define HW_SSP_CMD0 0x010 #define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) #define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) #define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) #define BM_SSP_CMD0_APPEND_8CYC (1 << 20) #define BP_SSP_CMD0_BLOCK_SIZE (16) #define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) #define BP_SSP_CMD0_BLOCK_COUNT (8) #define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) #define BP_SSP_CMD0_CMD (0) #define BM_SSP_CMD0_CMD (0xff) #define HW_SSP_CMD1 0x020 #define HW_SSP_XFER_SIZE 0x030 #define HW_SSP_BLOCK_SIZE 0x040 #define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4) #define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) #define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) #define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) #define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070) #define BP_SSP_TIMING_TIMEOUT (16) #define BM_SSP_TIMING_TIMEOUT (0xffff << 16) #define BP_SSP_TIMING_CLOCK_DIVIDE (8) #define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) #define BP_SSP_TIMING_CLOCK_RATE (0) #define BM_SSP_TIMING_CLOCK_RATE (0xff) #define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080) #define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) #define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) #define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) #define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) #define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) #define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) #define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) #define BM_SSP_CTRL1_POLARITY (1 << 9) #define BP_SSP_CTRL1_WORD_LENGTH (4) #define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) #define BP_SSP_CTRL1_SSP_MODE (0) #define BM_SSP_CTRL1_SSP_MODE (0xf) #define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0) #define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0) #define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0) #define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0) #define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100) #define BM_SSP_STATUS_CARD_DETECT (1 << 28) #define BM_SSP_STATUS_SDIO_IRQ (1 << 17) #define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130) #define BP_SSP_VERSION_MAJOR (24) #define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ BM_SSP_CTRL1_RESP_ERR_IRQ | \ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \ BM_SSP_CTRL1_DATA_CRC_IRQ | \ BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) #define SSP_PIO_NUM 3 struct mxs_mmc_host { struct mmc_host *mmc; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; void __iomem *base; int irq; struct resource *res; struct resource *dma_res; struct clk *clk; unsigned int clk_rate; struct dma_chan *dmach; struct mxs_dma_data dma_data; unsigned int dma_dir; u32 ssp_pio_words[SSP_PIO_NUM]; unsigned int version; unsigned char bus_width; spinlock_t lock; int sdio_irq_en; }; static int mxs_mmc_get_ro(struct mmc_host *mmc) { struct mxs_mmc_host *host = mmc_priv(mmc); struct mxs_mmc_platform_data *pdata = mmc_dev(host->mmc)->platform_data; if (!pdata) return -EFAULT; if (!gpio_is_valid(pdata->wp_gpio)) return -EINVAL; return gpio_get_value(pdata->wp_gpio); } static int mxs_mmc_get_cd(struct mmc_host *mmc) { struct mxs_mmc_host *host = mmc_priv(mmc); return !(readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_CARD_DETECT); } static void mxs_mmc_reset(struct mxs_mmc_host *host) { u32 ctrl0, ctrl1; mxs_reset_block(host->base); ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | BF_SSP(0x7, CTRL1_WORD_LENGTH) | BM_SSP_CTRL1_DMA_ENABLE | BM_SSP_CTRL1_POLARITY | BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | BM_SSP_CTRL1_DATA_CRC_IRQ_EN | BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | BM_SSP_CTRL1_RESP_ERR_IRQ_EN; writel(BF_SSP(0xffff, TIMING_TIMEOUT) | BF_SSP(2, TIMING_CLOCK_DIVIDE) | BF_SSP(0, TIMING_CLOCK_RATE), host->base + HW_SSP_TIMING); if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; } writel(ctrl0, host->base + HW_SSP_CTRL0); writel(ctrl1, host->base + HW_SSP_CTRL1); } static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, struct mmc_command *cmd); static void mxs_mmc_request_done(struct mxs_mmc_host *host) { struct mmc_command *cmd = host->cmd; struct mmc_data *data = host->data; struct mmc_request *mrq = host->mrq; if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { if (mmc_resp_type(cmd) & MMC_RSP_136) { cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0); cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1); cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2); cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3); } else { cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0); } } if (data) { dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); /* * If there was an error on any block, we mark all * data blocks as being in error. */ if (!data->error) data->bytes_xfered = data->blocks * data->blksz; else data->bytes_xfered = 0; host->data = NULL; if (mrq->stop) { mxs_mmc_start_cmd(host, mrq->stop); return; } } host->mrq = NULL; mmc_request_done(host->mmc, mrq); } static void mxs_mmc_dma_irq_callback(void *param) { struct mxs_mmc_host *host = param; mxs_mmc_request_done(host); } static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) { struct mxs_mmc_host *host = dev_id; struct mmc_command *cmd = host->cmd; struct mmc_data *data = host->data; u32 stat; spin_lock(&host->lock); stat = readl(host->base + HW_SSP_CTRL1); writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); spin_unlock(&host->lock); if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) cmd->error = -EIO; if (data) { if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | BM_SSP_CTRL1_RECV_TIMEOUT_IRQ)) data->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ) data->error = -EILSEQ; else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)) data->error = -EIO; } return IRQ_HANDLED; } static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( struct mxs_mmc_host *host, unsigned int append) { struct dma_async_tx_descriptor *desc; struct mmc_data *data = host->data; struct scatterlist * sgl; unsigned int sg_len; if (data) { /* data */ dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); sgl = data->sg; sg_len = data->sg_len; } else { /* pio */ sgl = (struct scatterlist *) host->ssp_pio_words; sg_len = SSP_PIO_NUM; } desc = host->dmach->device->device_prep_slave_sg(host->dmach, sgl, sg_len, host->dma_dir, append); if (desc) { desc->callback = mxs_mmc_dma_irq_callback; desc->callback_param = host; } else { if (data) dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); } return desc; } static void mxs_mmc_bc(struct mxs_mmc_host *host) { struct mmc_command *cmd = host->cmd; struct dma_async_tx_descriptor *desc; u32 ctrl0, cmd0, cmd1; ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC; cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; } host->ssp_pio_words[0] = ctrl0; host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; dmaengine_submit(desc); return; out: dev_warn(mmc_dev(host->mmc), "%s: failed to prep dma\n", __func__); } static void mxs_mmc_ac(struct mxs_mmc_host *host) { struct mmc_command *cmd = host->cmd; struct dma_async_tx_descriptor *desc; u32 ignore_crc, get_resp, long_resp; u32 ctrl0, cmd0, cmd1; ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? 0 : BM_SSP_CTRL0_IGNORE_CRC; get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? BM_SSP_CTRL0_GET_RESP : 0; long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? BM_SSP_CTRL0_LONG_RESP : 0; ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp; cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); cmd1 = cmd->arg; if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; } host->ssp_pio_words[0] = ctrl0; host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; dmaengine_submit(desc); return; out: dev_warn(mmc_dev(host->mmc), "%s: failed to prep dma\n", __func__); } static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns) { const unsigned int ssp_timeout_mul = 4096; /* * Calculate ticks in ms since ns are large numbers * and might overflow */ const unsigned int clock_per_ms = clock_rate / 1000; const unsigned int ms = ns / 1000; const unsigned int ticks = ms * clock_per_ms; const unsigned int ssp_ticks = ticks / ssp_timeout_mul; WARN_ON(ssp_ticks == 0); return ssp_ticks; } static void mxs_mmc_adtc(struct mxs_mmc_host *host) { struct mmc_command *cmd = host->cmd; struct mmc_data *data = cmd->data; struct dma_async_tx_descriptor *desc; struct scatterlist *sgl = data->sg, *sg; unsigned int sg_len = data->sg_len; int i; unsigned short dma_data_dir, timeout; unsigned int data_size = 0, log2_blksz; unsigned int blocks = data->blocks; u32 ignore_crc, get_resp, long_resp, read; u32 ctrl0, cmd0, cmd1, val; ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? 0 : BM_SSP_CTRL0_IGNORE_CRC; get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? BM_SSP_CTRL0_GET_RESP : 0; long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? BM_SSP_CTRL0_LONG_RESP : 0; if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; read = 0; } else { dma_data_dir = DMA_FROM_DEVICE; read = BM_SSP_CTRL0_READ; } ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | ignore_crc | get_resp | long_resp | BM_SSP_CTRL0_DATA_XFER | read | BM_SSP_CTRL0_WAIT_FOR_IRQ | BM_SSP_CTRL0_ENABLE; cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); /* get logarithm to base 2 of block size for setting register */ log2_blksz = ilog2(data->blksz); /* * take special care of the case that data size from data->sg * is not equal to blocks x blksz */ for_each_sg(sgl, sg, sg_len, i) data_size += sg->length; if (data_size != data->blocks * data->blksz) blocks = 1; /* xfer count, block size and count need to be set differently */ if (ssp_is_old()) { ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); } else { writel(data_size, host->base + HW_SSP_XFER_SIZE); writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), host->base + HW_SSP_BLOCK_SIZE); } if ((cmd->opcode == MMC_STOP_TRANSMISSION) || (cmd->opcode == SD_IO_RW_EXTENDED)) cmd0 |= BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; } /* set the timeout count */ timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); val = readl(host->base + HW_SSP_TIMING); val &= ~(BM_SSP_TIMING_TIMEOUT); val |= BF_SSP(timeout, TIMING_TIMEOUT); writel(val, host->base + HW_SSP_TIMING); /* pio */ host->ssp_pio_words[0] = ctrl0; host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; /* append data sg */ WARN_ON(host->data != NULL); host->data = data; host->dma_dir = dma_data_dir; desc = mxs_mmc_prep_dma(host, 1); if (!desc) goto out; dmaengine_submit(desc); return; out: dev_warn(mmc_dev(host->mmc), "%s: failed to prep dma\n", __func__); } static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, struct mmc_command *cmd) { host->cmd = cmd; switch (mmc_cmd_type(cmd)) { case MMC_CMD_BC: mxs_mmc_bc(host); break; case MMC_CMD_BCR: mxs_mmc_ac(host); break; case MMC_CMD_AC: mxs_mmc_ac(host); break; case MMC_CMD_ADTC: mxs_mmc_adtc(host); break; default: dev_warn(mmc_dev(host->mmc), "%s: unknown MMC command\n", __func__); break; } } static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mxs_mmc_host *host = mmc_priv(mmc); WARN_ON(host->mrq != NULL); host->mrq = mrq; mxs_mmc_start_cmd(host, mrq->cmd); } static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) { unsigned int ssp_clk, ssp_sck; u32 clock_divide, clock_rate; u32 val; ssp_clk = clk_get_rate(host->clk); for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) { clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide); clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0; if (clock_rate <= 255) break; } if (clock_divide > 254) { dev_err(mmc_dev(host->mmc), "%s: cannot set clock to %d\n", __func__, rate); return; } ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); val = readl(host->base + HW_SSP_TIMING); val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); writel(val, host->base + HW_SSP_TIMING); host->clk_rate = ssp_sck; dev_dbg(mmc_dev(host->mmc), "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n", __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate); } static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mxs_mmc_host *host = mmc_priv(mmc); if (ios->bus_width == MMC_BUS_WIDTH_8) host->bus_width = 2; else if (ios->bus_width == MMC_BUS_WIDTH_4) host->bus_width = 1; else host->bus_width = 0; if (ios->clock) mxs_mmc_set_clk_rate(host, ios->clock); } static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mxs_mmc_host *host = mmc_priv(mmc); unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->sdio_irq_en = enable; if (enable) { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + MXS_SET_ADDR); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1 + MXS_SET_ADDR); if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ) mmc_signal_sdio_irq(host->mmc); } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); } spin_unlock_irqrestore(&host->lock, flags); } static const struct mmc_host_ops mxs_mmc_ops = { .request = mxs_mmc_request, .get_ro = mxs_mmc_get_ro, .get_cd = mxs_mmc_get_cd, .set_ios = mxs_mmc_set_ios, .enable_sdio_irq = mxs_mmc_enable_sdio_irq, }; static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) { struct mxs_mmc_host *host = param; if (!mxs_dma_is_apbh(chan)) return false; if (chan->chan_id != host->dma_res->start) return false; chan->private = &host->dma_data; return true; } static int mxs_mmc_probe(struct platform_device *pdev) { struct mxs_mmc_host *host; struct mmc_host *mmc; struct resource *iores, *dmares, *r; struct mxs_mmc_platform_data *pdata; int ret = 0, irq_err, irq_dma; dma_cap_mask_t mask; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); irq_err = platform_get_irq(pdev, 0); irq_dma = platform_get_irq(pdev, 1); if (!iores || !dmares || irq_err < 0 || irq_dma < 0) return -EINVAL; r = request_mem_region(iores->start, resource_size(iores), pdev->name); if (!r) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out_release_mem; } host = mmc_priv(mmc); host->base = ioremap(r->start, resource_size(r)); if (!host->base) { ret = -ENOMEM; goto out_mmc_free; } /* only major verion does matter */ host->version = readl(host->base + HW_SSP_VERSION) >> BP_SSP_VERSION_MAJOR; host->mmc = mmc; host->res = r; host->dma_res = dmares; host->irq = irq_err; host->sdio_irq_en = 0; host->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); goto out_iounmap; } clk_enable(host->clk); mxs_mmc_reset(host); dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); host->dma_data.chan_irq = irq_dma; host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); if (!host->dmach) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); goto out_clk_put; } /* set mmc core parameters */ mmc->ops = &mxs_mmc_ops; mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; pdata = mmc_dev(host->mmc)->platform_data; if (pdata) { if (pdata->flags & SLOTF_8_BIT_CAPABLE) mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; if (pdata->flags & SLOTF_4_BIT_CAPABLE) mmc->caps |= MMC_CAP_4_BIT_DATA; } mmc->f_min = 400000; mmc->f_max = 288000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_segs = 52; mmc->max_blk_size = 1 << 0xf; mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff; mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff; mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); platform_set_drvdata(pdev, mmc); ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host); if (ret) goto out_free_dma; spin_lock_init(&host->lock); ret = mmc_add_host(mmc); if (ret) goto out_free_irq; dev_info(mmc_dev(host->mmc), "initialized\n"); return 0; out_free_irq: free_irq(host->irq, host); out_free_dma: if (host->dmach) dma_release_channel(host->dmach); out_clk_put: clk_disable(host->clk); clk_put(host->clk); out_iounmap: iounmap(host->base); out_mmc_free: mmc_free_host(mmc); out_release_mem: release_mem_region(iores->start, resource_size(iores)); return ret; } static int mxs_mmc_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct mxs_mmc_host *host = mmc_priv(mmc); struct resource *res = host->res; mmc_remove_host(mmc); free_irq(host->irq, host); platform_set_drvdata(pdev, NULL); if (host->dmach) dma_release_channel(host->dmach); clk_disable(host->clk); clk_put(host->clk); iounmap(host->base); mmc_free_host(mmc); release_mem_region(res->start, resource_size(res)); return 0; } #ifdef CONFIG_PM static int mxs_mmc_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); int ret = 0; ret = mmc_suspend_host(mmc); clk_disable(host->clk); return ret; } static int mxs_mmc_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); int ret = 0; clk_enable(host->clk); ret = mmc_resume_host(mmc); return ret; } static const struct dev_pm_ops mxs_mmc_pm_ops = { .suspend = mxs_mmc_suspend, .resume = mxs_mmc_resume, }; #endif static struct platform_driver mxs_mmc_driver = { .probe = mxs_mmc_probe, .remove = mxs_mmc_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &mxs_mmc_pm_ops, #endif }, }; static int __init mxs_mmc_init(void) { return platform_driver_register(&mxs_mmc_driver); } static void __exit mxs_mmc_exit(void) { platform_driver_unregister(&mxs_mmc_driver); } module_init(mxs_mmc_init); module_exit(mxs_mmc_exit); MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); MODULE_AUTHOR("Freescale Semiconductor"); MODULE_LICENSE("GPL");
gpl-2.0
ryrzy/g2_4.2.2
drivers/video/msm/mdp4_overlay_mddi.c
2197
24860
/* Copyright (c) 2009-2012, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include <asm/system.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include "mdp.h" #include "msm_fb.h" #include "mdp4.h" static int mddi_state; #define TOUT_PERIOD HZ /* 1 second */ #define MS_100 (HZ/10) /* 100 ms */ static int vsync_start_y_adjust = 4; #define MAX_CONTROLLER 1 #define VSYNC_EXPIRE_TICK 8 static struct vsycn_ctrl { struct device *dev; int inited; int update_ndx; int expire_tick; int blt_wait; u32 ov_koff; u32 ov_done; u32 dmap_koff; u32 dmap_done; uint32 rdptr_intr_tot; uint32 rdptr_sirq_tot; atomic_t suspend; int wait_vsync_cnt; int blt_change; int blt_free; int blt_end; int uevent; struct mutex update_lock; struct completion ov_comp; struct completion dmap_comp; struct completion vsync_comp; spinlock_t spin_lock; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *base_pipe; struct vsync_update vlist[2]; int vsync_enabled; int clk_enabled; int clk_control; int new_update; ktime_t vsync_time; struct work_struct vsync_work; struct work_struct clk_work; } vsync_ctrl_db[MAX_CONTROLLER]; static void vsync_irq_enable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); /* no need to clear other interrupts for comamnd mode */ mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); } static void vsync_irq_disable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); /* no need to clrear other interrupts for comamnd mode */ mdp_intr_mask &= ~intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_disable_irq_nosync(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); } static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; char *overlay_base; if (pipe->ov_blt_addr == 0) return; bpp = 3; /* overlay ouput is RGB888 */ off = 0; if (pipe->ov_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->ov_blt_addr + off; /* overlay 0 */ overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */ outpdw(overlay_base + 0x000c, addr); outpdw(overlay_base + 0x001c, addr); } static void mdp4_mddi_blt_dmap_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; if (pipe->ov_blt_addr == 0) return; bpp = 3; /* overlay ouput is RGB888 */ off = 0; if (pipe->dmap_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->dma_blt_addr + off; /* dmap */ MDP_OUTP(MDP_BASE + 0x90008, addr); } static void mdp4_mddi_wait4dmap(int cndx); static void mdp4_mddi_wait4ov(int cndx); static void mdp4_mddi_do_blt(struct msm_fb_data_type *mfd, int enable) { unsigned long flags; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int need_wait; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0); if (mfd->ov0_wb_buf->write_addr == 0) { pr_err("%s: no blt_base assigned\n", __func__); return; } spin_lock_irqsave(&vctrl->spin_lock, flags); if (enable && pipe->ov_blt_addr == 0) { vctrl->blt_change++; if (vctrl->dmap_koff != vctrl->dmap_done) { INIT_COMPLETION(vctrl->dmap_comp); need_wait = 1; } } else if (enable == 0 && pipe->ov_blt_addr) { vctrl->blt_change++; if (vctrl->ov_koff != vctrl->dmap_done) { INIT_COMPLETION(vctrl->dmap_comp); need_wait = 1; } } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_mddi_wait4dmap(0); spin_lock_irqsave(&vctrl->spin_lock, flags); if (enable && pipe->ov_blt_addr == 0) { pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr; pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr; pipe->ov_cnt = 0; pipe->dmap_cnt = 0; vctrl->ov_koff = vctrl->dmap_koff; vctrl->ov_done = vctrl->dmap_done; vctrl->blt_free = 0; vctrl->blt_wait = 0; vctrl->blt_end = 0; mdp4_stat.blt_mddi++; } else if (enable == 0 && pipe->ov_blt_addr) { pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->blt_end = 1; vctrl->blt_free = 4; /* 4 commits to free wb buf */ } pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__, vctrl->blt_change, enable, (int)pipe->ov_blt_addr); spin_unlock_irqrestore(&vctrl->spin_lock, flags); } /* * mdp4_mddi_do_update: * called from thread context */ void mdp4_mddi_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pp; int undx; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */ pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__, undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid); *pp = *pipe; /* clone it */ vp->update_cnt++; mutex_unlock(&vctrl->update_lock); mdp4_stat.overlay_play[pipe->mixer_num]++; } static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe); int mdp4_mddi_pipe_commit(void) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int need_dmap_wait = 0; int need_ov_wait = 0; int cnt = 0; vctrl = &vsync_ctrl_db[0]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return cnt; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { /* Blt */ if (vctrl->blt_wait) need_dmap_wait = 1; if (vctrl->ov_koff != vctrl->ov_done) { INIT_COMPLETION(vctrl->ov_comp); need_ov_wait = 1; } } else { /* direct out */ if (vctrl->dmap_koff != vctrl->dmap_done) { INIT_COMPLETION(vctrl->dmap_comp); pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); need_dmap_wait = 1; } } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_dmap_wait) { pr_debug("%s: wait4dmap\n", __func__); mdp4_mddi_wait4dmap(0); } if (need_ov_wait) { pr_debug("%s: wait4ov\n", __func__); mdp4_mddi_wait4ov(0); } if (pipe->ov_blt_addr) { if (vctrl->blt_end) { vctrl->blt_end = 0; pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; } } if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); vctrl->blt_change = 0; } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_mddi_blt_ov_update(pipe); pipe->ov_cnt++; vctrl->ov_koff++; vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); } else { vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_koff++; } pr_debug("%s: kickoff\n", __func__); /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); mb(); /* make sure kickoff ececuted */ spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; return cnt; } void mdp4_mddi_vsync_ctrl(struct fb_info *info, int enable) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; struct vsycn_ctrl *vctrl; unsigned long flags; int clk_set_on = 0; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; pr_debug("%s: clk_enabled=%d vsycn_enabeld=%d req=%d\n", __func__, vctrl->clk_enabled, vctrl->vsync_enabled, enable); mutex_lock(&vctrl->update_lock); if (vctrl->vsync_enabled == enable) { mutex_unlock(&vctrl->update_lock); return; } vctrl->vsync_enabled = enable; if (enable) { if (vctrl->clk_enabled == 0) { pr_debug("%s: SET_CLK_ON\n", __func__); mdp_clk_ctrl(1); vctrl->clk_enabled = 1; clk_set_on = 1; } spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->expire_tick = 0; vctrl->uevent = 1; vctrl->new_update = 1; if (clk_set_on) { vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_update_mddi(mfd); } else { spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 1; vctrl->uevent = 0; if (vctrl->clk_enabled) vctrl->expire_tick = VSYNC_EXPIRE_TICK; spin_unlock_irqrestore(&vctrl->spin_lock, flags); } mutex_unlock(&vctrl->update_lock); } void mdp4_mddi_wait4vsync(int cndx, long long *vtime) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) { *vtime = -1; return; } spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); wait_for_completion(&vctrl->vsync_comp); mdp4_stat.wait4vsync0++; *vtime = ktime_to_ns(vctrl->vsync_time); } static void mdp4_mddi_wait4dmap(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->dmap_comp); } static void mdp4_mddi_wait4ov(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->ov_comp); } /* * primary_rdptr_isr: * called from interrupt context */ static void primary_rdptr_isr(int cndx) { struct vsycn_ctrl *vctrl; vctrl = &vsync_ctrl_db[cndx]; pr_debug("%s: ISR, cpu=%d\n", __func__, smp_processor_id()); vctrl->rdptr_intr_tot++; vctrl->vsync_time = ktime_get(); spin_lock(&vctrl->spin_lock); if (vctrl->uevent) schedule_work(&vctrl->vsync_work); if (vctrl->wait_vsync_cnt) { complete(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } if (vctrl->expire_tick) { vctrl->expire_tick--; if (vctrl->expire_tick == 0) schedule_work(&vctrl->clk_work); } spin_unlock(&vctrl->spin_lock); } void mdp4_dmap_done_mddi(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; /* blt enabled */ spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_done++; diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); complete_all(&vctrl->dmap_comp); if (diff <= 0) { if (vctrl->blt_wait) vctrl->blt_wait = 0; spin_unlock(&vctrl->spin_lock); return; } /* kick dmap */ mdp4_mddi_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */ mb(); /* make sure kickoff executed */ spin_unlock(&vctrl->spin_lock); } /* * mdp4_overlay0_done_mddi: called from isr */ void mdp4_overlay0_done_mddi(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); if (pipe->ov_blt_addr == 0) { /* blt disabled */ spin_unlock(&vctrl->spin_lock); return; } if (diff > 1) { /* * two overlay_done and none dmap_done yet * let dmap_done kickoff dmap * and put pipe_commit to wait */ vctrl->blt_wait = 1; pr_debug("%s: blt_wait set\n", __func__); spin_unlock(&vctrl->spin_lock); return; } mdp4_mddi_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */ mb(); /* make sure kickoff executed */ spin_unlock(&vctrl->spin_lock); } static void clk_ctrl_work(struct work_struct *work) { struct vsycn_ctrl *vctrl = container_of(work, typeof(*vctrl), clk_work); unsigned long flags; mutex_lock(&vctrl->update_lock); if (vctrl->clk_control && vctrl->clk_enabled) { pr_debug("%s: SET_CLK_OFF\n", __func__); mdp_clk_ctrl(0); spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->clk_enabled = 0; vctrl->clk_control = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); } mutex_unlock(&vctrl->update_lock); } static void send_vsync_work(struct work_struct *work) { struct vsycn_ctrl *vctrl = container_of(work, typeof(*vctrl), vsync_work); char buf[64]; char *envp[2]; snprintf(buf, sizeof(buf), "VSYNC=%llu", ktime_to_ns(vctrl->vsync_time)); envp[0] = buf; envp[1] = NULL; kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp); } void mdp4_mddi_rdptr_init(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (vctrl->inited) return; vctrl->inited = 1; vctrl->update_ndx = 0; mutex_init(&vctrl->update_lock); init_completion(&vctrl->ov_comp); init_completion(&vctrl->dmap_comp); init_completion(&vctrl->vsync_comp); spin_lock_init(&vctrl->spin_lock); INIT_WORK(&vctrl->vsync_work, send_vsync_work); INIT_WORK(&vctrl->clk_work, clk_ctrl_work); } void mdp4_primary_rdptr(void) { primary_rdptr_isr(0); } void mdp4_overlay_mddi_state_set(int state) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mddi_state = state; spin_unlock_irqrestore(&mdp_spin_lock, flag); } int mdp4_overlay_mddi_state_get(void) { return mddi_state; } static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp) { /* * The adreno GPU hardware requires that the pitch be aligned to * 32 pixels for color buffers, so for the cases where the GPU * is writing directly to fb0, the framebuffer pitch * also needs to be 32 pixel aligned */ if (fb_index == 0) return ALIGN(xres, 32) * bpp; else return xres * bpp; } void mdp4_mddi_vsync_enable(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe, int which) { uint32 start_y, data, tear_en; tear_en = (1 << which); if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) && (mfd->panel_info.lcd.vsync_enable)) { if (vsync_start_y_adjust <= pipe->dst_y) start_y = pipe->dst_y - vsync_start_y_adjust; else start_y = (mfd->total_lcd_lines - 1) - (vsync_start_y_adjust - pipe->dst_y); if (which == 0) MDP_OUTP(MDP_BASE + 0x210, start_y); /* primary */ else MDP_OUTP(MDP_BASE + 0x214, start_y); /* secondary */ data = inpdw(MDP_BASE + 0x20c); data |= tear_en; MDP_OUTP(MDP_BASE + 0x20c, data); } else { data = inpdw(MDP_BASE + 0x20c); data &= ~tear_en; MDP_OUTP(MDP_BASE + 0x20c, data); } } void mdp4_mddi_base_swap(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; vctrl->base_pipe = pipe; } static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { MDPIBUF *iBuf = &mfd->ibuf; struct fb_info *fbi; int bpp; uint8 *src; /* whole screen for base layer */ src = (uint8 *) iBuf->buf; fbi = mfd->fbi; if (pipe->is_3d) { bpp = fbi->var.bits_per_pixel / 8; pipe->src_height = pipe->src_height_3d; pipe->src_width = pipe->src_width_3d; pipe->src_h = pipe->src_height_3d; pipe->src_w = pipe->src_width_3d; pipe->dst_h = pipe->src_height_3d; pipe->dst_w = pipe->src_width_3d; pipe->srcp0_ystride = msm_fb_line_length(0, pipe->src_width, bpp); } else { /* 2D */ pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->srcp0_ystride = fbi->fix.line_length; } pipe->src_y = 0; pipe->src_x = 0; pipe->dst_y = 0; pipe->dst_x = 0; pipe->srcp0_addr = (uint32)src; } void mdp4_overlay_update_mddi(struct msm_fb_data_type *mfd) { int ptype; uint32 mddi_ld_param; uint16 mddi_vdo_packet_reg; struct mdp4_overlay_pipe *pipe; uint32 data; int ret; int cndx = 0; struct vsycn_ctrl *vctrl; if (mfd->key != MFD_KEY) return; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->base_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); if (ptype < 0) pr_err("%s: format2type failed\n", __func__); pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0); if (pipe == NULL) { pr_err("%s: pipe_alloc failed\n", __func__); return; } pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER0; pipe->src_format = mfd->fb_imgType; mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_MDDI); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) pr_err("%s: format2type failed\n", __func__); vctrl->base_pipe = pipe; /* keep it */ mdp4_init_writeback_buf(mfd, MDP4_MIXER0); pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; } else { pipe = vctrl->base_pipe; } MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */ mddi_ld_param = 0; mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt; if (mdp_hw_revision == MDP4_REVISION_V2_1) { data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); } if (mfd->panel_info.type == MDDI_PANEL) { if (mfd->panel_info.pdest == DISPLAY_1) mddi_ld_param = 0; else mddi_ld_param = 1; } else { mddi_ld_param = 2; } MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param); if (mfd->panel_info.bpp == 24) MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg); else if (mfd->panel_info.bpp == 16) MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg); else MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg); MDP_OUTP(MDP_BASE + 0x00098, 0x01); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_overlay_rgb_setup(pipe); mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe, 0); mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); mdp4_overlay_dmap_cfg(mfd, 0); mdp4_mixer_stage_commit(pipe->mixer_num); wmb(); } void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd) { mdp4_mddi_do_blt(mfd, 1); } void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd) { mdp4_mddi_do_blt(mfd, 0); } void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { mdp4_mddi_do_blt(mfd, req->enable); } int mdp4_mddi_on(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; pr_debug("%s+:\n", __func__); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; vctrl->mfd = mfd; vctrl->dev = mfd->fbi->dev; mdp_clk_ctrl(1); mdp4_overlay_update_mddi(mfd); mdp_clk_ctrl(0); mdp4_iommu_attach(); atomic_set(&vctrl->suspend, 0); pr_debug("%s-:\n", __func__); return ret; } int mdp4_mddi_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; pr_debug("%s+:\n", __func__); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } atomic_set(&vctrl->suspend, 1); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; if (vctrl->clk_enabled) { /* * in case of suspend, vsycn_ctrl off is not * received from frame work which left clock on * then, clock need to be turned off here */ mdp_clk_ctrl(0); } vctrl->clk_enabled = 0; vctrl->vsync_enabled = 0; vctrl->clk_control = 0; vctrl->expire_tick = 0; vctrl->uevent = 0; vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); pr_debug("%s-:\n", __func__); /* * footswitch off * this will casue all mdp register * to be reset to default * after footswitch on later */ return ret; } void mdp_mddi_overlay_suspend(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; /* dis-engage rgb0 from mixer0 */ if (pipe) { if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* pipe == rgb1 */ mdp4_overlay_unset_mixer(pipe->mixer_num); vctrl->base_pipe = NULL; } else { mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1); } } } void mdp4_mddi_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; long long xx; vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) return; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } mutex_lock(&vctrl->update_lock); if (!vctrl->clk_enabled) { pr_err("%s: mdp clocks disabled\n", __func__); mutex_unlock(&vctrl->update_lock); return; } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->expire_tick) { /* * in the middle of shutting clocks down * delay to allow pan display to go through */ vctrl->expire_tick = VSYNC_EXPIRE_TICK; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mddi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_mddi_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mutex_lock(&mfd->dma->ov_mutex); mdp4_mddi_pipe_commit(); mutex_unlock(&mfd->dma->ov_mutex); mdp4_mddi_wait4vsync(0, &xx); mdp4_overlay_mdp_perf_upd(mfd, 0); } int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct msm_fb_data_type *mfd = info->par; mutex_lock(&mfd->dma->ov_mutex); if (mfd && mfd->panel_power_on) { mdp_hw_cursor_update(info, cursor); } mutex_unlock(&mfd->dma->ov_mutex); return 0; }
gpl-2.0
aatjitra/sgs3
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
2453
10444
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> #include "iwl-commands.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-debug.h" #include "iwl-agn.h" #include "iwl-io.h" /****************************************************************************** * * EEPROM related functions * ******************************************************************************/ int iwl_eeprom_check_version(struct iwl_priv *priv) { u16 eeprom_ver; u16 calib_ver; eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); calib_ver = iwlagn_eeprom_calib_version(priv); if (eeprom_ver < priv->cfg->eeprom_ver || calib_ver < priv->cfg->eeprom_calib_ver) goto err; IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver); return 0; err: IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " "CALIB=0x%x < 0x%x\n", eeprom_ver, priv->cfg->eeprom_ver, calib_ver, priv->cfg->eeprom_calib_ver); return -EINVAL; } int iwl_eeprom_check_sku(struct iwl_priv *priv) { u16 eeprom_sku; u16 radio_cfg; eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); if (!priv->cfg->sku) { /* not using sku overwrite */ priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> EEPROM_SKU_CAP_BAND_POS); if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) priv->cfg->sku |= IWL_SKU_N; } if (!priv->cfg->sku) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku); if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) { /* not using .cfg overwrite */ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant); return -EINVAL; } IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n", priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant); } /* * for some special cases, * EEPROM did not reflect the correct antenna setting * so overwrite the valid tx/rx antenna from .cfg */ return 0; } void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) { const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } /** * iwl_get_max_txpower_avg - get the highest tx power from all chains. * find the highest tx power from all chains for the channel */ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, int element, s8 *max_txpower_in_half_dbm) { s8 max_txpower_avg = 0; /* (dBm) */ /* Take the highest tx power from any valid chains */ if ((priv->cfg->valid_tx_ant & ANT_A) && (enhanced_txpower[element].chain_a_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_a_max; if ((priv->cfg->valid_tx_ant & ANT_B) && (enhanced_txpower[element].chain_b_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_b_max; if ((priv->cfg->valid_tx_ant & ANT_C) && (enhanced_txpower[element].chain_c_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].chain_c_max; if (((priv->cfg->valid_tx_ant == ANT_AB) | (priv->cfg->valid_tx_ant == ANT_BC) | (priv->cfg->valid_tx_ant == ANT_AC)) && (enhanced_txpower[element].mimo2_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo2_max; if ((priv->cfg->valid_tx_ant == ANT_ABC) && (enhanced_txpower[element].mimo3_max > max_txpower_avg)) max_txpower_avg = enhanced_txpower[element].mimo3_max; /* * max. tx power in EEPROM is in 1/2 dBm format * convert from 1/2 dBm to dBm (round-up convert) * but we also do not want to loss 1/2 dBm resolution which * will impact performance */ *max_txpower_in_half_dbm = max_txpower_avg; return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1); } static void iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv, struct iwl_eeprom_enhanced_txpwr *txp, s8 max_txpower_avg) { int ch_idx; bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ; enum ieee80211_band band; band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) { struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx]; /* update matching channel or from common data only */ if (txp->channel != 0 && ch_info->channel != txp->channel) continue; /* update matching band only */ if (band != ch_info->band) continue; if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) { ch_info->max_power_avg = max_txpower_avg; ch_info->curr_txpow = max_txpower_avg; ch_info->scan_power = max_txpower_avg; } if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg) ch_info->ht40_max_power_avg = max_txpower_avg; } } #define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT) #define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr) #define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE) #define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \ ? # x " " : "") void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) { struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; __le16 *txp_len; s8 max_txp_avg, max_txp_avg_halfdbm; BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; /* skip invalid entries */ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) continue; IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n", (txp->channel && (txp->flags & IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ? "Common " : (txp->channel) ? "Channel" : "Common", (txp->channel), TXP_CHECK_AND_PRINT(VALID), TXP_CHECK_AND_PRINT(BAND_52G), TXP_CHECK_AND_PRINT(OFDM), TXP_CHECK_AND_PRINT(40MHZ), TXP_CHECK_AND_PRINT(HT_AP), TXP_CHECK_AND_PRINT(RES1), TXP_CHECK_AND_PRINT(RES2), TXP_CHECK_AND_PRINT(COMMON_TYPE), txp->flags); IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x " "chain_B: 0X%02x chain_C: 0X%02x\n", txp->chain_a_max, txp->chain_b_max, txp->chain_c_max); IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x " "MIMO3: 0x%02x High 20_on_40: 0x%02x " "Low 20_on_40: 0x%02x\n", txp->mimo2_max, txp->mimo3_max, ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, &max_txp_avg_halfdbm); /* * Update the user limit values values to the highest * power supported by any channel */ if (max_txp_avg > priv->tx_power_user_lmt) priv->tx_power_user_lmt = max_txp_avg; if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm) priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm; iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg); } }
gpl-2.0
szezso/VivoION_kernel
drivers/acpi/acpica/exregion.c
3221
15317
/****************************************************************************** * * Module Name: exregion - ACPI default op_region (address space) handlers * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exregion") /******************************************************************************* * * FUNCTION: acpi_ex_system_memory_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System Memory address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_memory_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; void *logical_addr_ptr = NULL; struct acpi_mem_space_context *mem_info = region_context; u32 length; acpi_size map_length; acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif ACPI_FUNCTION_TRACE(ex_system_memory_space_handler); /* Validate and translate the bit width */ switch (bit_width) { case 8: length = 1; break; case 16: length = 2; break; case 32: length = 4; break; case 64: length = 8; break; default: ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u", bit_width)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED /* * Hardware does not support non-aligned data transfers, we must verify * the request. */ (void)acpi_ut_short_divide((u64) address, length, NULL, &remainder); if (remainder != 0) { return_ACPI_STATUS(AE_AML_ALIGNMENT); } #endif /* * Does the request fit into the cached memory mapping? * Is 1) Address below the current mapping? OR * 2) Address beyond the current mapping? */ if ((address < mem_info->mapped_physical_address) || (((u64) address + length) > ((u64) mem_info->mapped_physical_address + mem_info->mapped_length))) { /* * The request cannot be resolved by the current memory mapping; * Delete the existing mapping and create a new one. */ if (mem_info->mapped_length) { /* Valid mapping, delete it */ acpi_os_unmap_memory(mem_info->mapped_logical_address, mem_info->mapped_length); } /* * Attempt to map from the requested address to the end of the region. * However, we will never map more than one page, nor will we cross * a page boundary. */ map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); /* * If mapping the entire remaining portion of the region will cross * a page boundary, just map up to the page boundary, do not cross. * On some systems, crossing a page boundary while mapping regions * can cause warnings if the pages have different attributes * due to resource management */ page_boundary_map_length = ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address; if (!page_boundary_map_length) { page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; } if (map_length > page_boundary_map_length) { map_length = page_boundary_map_length; } /* Create a new mapping starting at the address given */ mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length); if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", ACPI_FORMAT_NATIVE_UINT(address), (u32) map_length)); mem_info->mapped_length = 0; return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the physical address and mapping size */ mem_info->mapped_physical_address = address; mem_info->mapped_length = map_length; } /* * Generate a logical pointer corresponding to the address we want to * access */ logical_addr_ptr = mem_info->mapped_logical_address + ((u64) address - (u64) mem_info->mapped_physical_address); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* * Perform the memory read or write * * Note: For machines that do not support non-aligned transfers, the target * address was checked for alignment above. We do not attempt to break the * transfer up into smaller (byte-size) chunks because the AML specifically * asked for a transfer width that the hardware may require. */ switch (function) { case ACPI_READ: *value = 0; switch (bit_width) { case 8: *value = (u64) ACPI_GET8(logical_addr_ptr); break; case 16: *value = (u64) ACPI_GET16(logical_addr_ptr); break; case 32: *value = (u64) ACPI_GET32(logical_addr_ptr); break; case 64: *value = (u64) ACPI_GET64(logical_addr_ptr); break; default: /* bit_width was already validated */ break; } break; case ACPI_WRITE: switch (bit_width) { case 8: ACPI_SET8(logical_addr_ptr) = (u8) * value; break; case 16: ACPI_SET16(logical_addr_ptr) = (u16) * value; break; case 32: ACPI_SET32(logical_addr_ptr) = (u32) * value; break; case 64: ACPI_SET64(logical_addr_ptr) = (u64) * value; break; default: /* bit_width was already validated */ break; } break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_io_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System IO address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_io_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; u32 value32; ACPI_FUNCTION_TRACE(ex_system_io_space_handler); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* Decode the function parameter */ switch (function) { case ACPI_READ: status = acpi_hw_read_port((acpi_io_address) address, &value32, bit_width); *value = value32; break; case ACPI_WRITE: status = acpi_hw_write_port((acpi_io_address) address, (u32) * value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_config_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI Config address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_config_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; struct acpi_pci_id *pci_id; u16 pci_register; ACPI_FUNCTION_TRACE(ex_pci_config_space_handler); /* * The arguments to acpi_os(Read|Write)pci_configuration are: * * pci_segment is the PCI bus segment range 0-31 * pci_bus is the PCI bus number range 0-255 * pci_device is the PCI device number range 0-31 * pci_function is the PCI device function number * pci_register is the Config space register range 0-255 bytes * * Value - input value for write, output address for read * */ pci_id = (struct acpi_pci_id *)region_context; pci_register = (u16) (u32) address; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", function, bit_width, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, pci_register)); switch (function) { case ACPI_READ: status = acpi_os_read_pci_configuration(pci_id, pci_register, value, bit_width); break; case ACPI_WRITE: status = acpi_os_write_pci_configuration(pci_id, pci_register, *value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_cmos_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the CMOS address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_cmos_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_cmos_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_bar_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI bar_target address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_bar_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_data_table_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the Data Table address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_data_table_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { ACPI_FUNCTION_TRACE(ex_data_table_space_handler); /* * Perform the memory read or write. The bit_width was already * validated. */ switch (function) { case ACPI_READ: ACPI_MEMCPY(ACPI_CAST_PTR(char, value), ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width)); break; case ACPI_WRITE: ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address), ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width)); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } return_ACPI_STATUS(AE_OK); }
gpl-2.0
dmeadows013/furry-hipster
drivers/video/intelfb/intelfbhw.c
3989
52538
/* * intelfb * * Linux framebuffer driver for Intel(R) 865G integrated graphics chips. * * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org> * 2004 Sylvain Meyer * * This driver consists of two parts. The first part (intelfbdrv.c) provides * the basic fbdev interfaces, is derived in part from the radeonfb and * vesafb drivers, and is covered by the GPL. The second part (intelfbhw.c) * provides the code to program the hardware. Most of it is derived from * the i810/i830 XFree86 driver. The HW-specific code is covered here * under a dual license (GPL and MIT/XFree86 license). * * Author: David Dawes * */ /* $DHD: intelfb/intelfbhw.c,v 1.9 2003/06/27 15:06:25 dawes Exp $ */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/interrupt.h> #include <asm/io.h> #include "intelfb.h" #include "intelfbhw.h" struct pll_min_max { int min_m, max_m, min_m1, max_m1; int min_m2, max_m2, min_n, max_n; int min_p, max_p, min_p1, max_p1; int min_vco, max_vco, p_transition_clk, ref_clk; int p_inc_lo, p_inc_hi; }; #define PLLS_I8xx 0 #define PLLS_I9xx 1 #define PLLS_MAX 2 static struct pll_min_max plls[PLLS_MAX] = { { 108, 140, 18, 26, 6, 16, 3, 16, 4, 128, 0, 31, 930000, 1400000, 165000, 48000, 4, 2 }, /* I8xx */ { 75, 120, 10, 20, 5, 9, 4, 7, 5, 80, 1, 8, 1400000, 2800000, 200000, 96000, 10, 5 } /* I9xx */ }; int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo) { u32 tmp; if (!pdev || !dinfo) return 1; switch (pdev->device) { case PCI_DEVICE_ID_INTEL_830M: dinfo->name = "Intel(R) 830M"; dinfo->chipset = INTEL_830M; dinfo->mobile = 1; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_845G: dinfo->name = "Intel(R) 845G"; dinfo->chipset = INTEL_845G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_854: dinfo->mobile = 1; dinfo->name = "Intel(R) 854"; dinfo->chipset = INTEL_854; return 0; case PCI_DEVICE_ID_INTEL_85XGM: tmp = 0; dinfo->mobile = 1; dinfo->pll_index = PLLS_I8xx; pci_read_config_dword(pdev, INTEL_85X_CAPID, &tmp); switch ((tmp >> INTEL_85X_VARIANT_SHIFT) & INTEL_85X_VARIANT_MASK) { case INTEL_VAR_855GME: dinfo->name = "Intel(R) 855GME"; dinfo->chipset = INTEL_855GME; return 0; case INTEL_VAR_855GM: dinfo->name = "Intel(R) 855GM"; dinfo->chipset = INTEL_855GM; return 0; case INTEL_VAR_852GME: dinfo->name = "Intel(R) 852GME"; dinfo->chipset = INTEL_852GME; return 0; case INTEL_VAR_852GM: dinfo->name = "Intel(R) 852GM"; dinfo->chipset = INTEL_852GM; return 0; default: dinfo->name = "Intel(R) 852GM/855GM"; dinfo->chipset = INTEL_85XGM; return 0; } break; case PCI_DEVICE_ID_INTEL_865G: dinfo->name = "Intel(R) 865G"; dinfo->chipset = INTEL_865G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_915G: dinfo->name = "Intel(R) 915G"; dinfo->chipset = INTEL_915G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_915GM: dinfo->name = "Intel(R) 915GM"; dinfo->chipset = INTEL_915GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945G: dinfo->name = "Intel(R) 945G"; dinfo->chipset = INTEL_945G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945GM: dinfo->name = "Intel(R) 945GM"; dinfo->chipset = INTEL_945GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945GME: dinfo->name = "Intel(R) 945GME"; dinfo->chipset = INTEL_945GME; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_965G: dinfo->name = "Intel(R) 965G"; dinfo->chipset = INTEL_965G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_965GM: dinfo->name = "Intel(R) 965GM"; dinfo->chipset = INTEL_965GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; default: return 1; } } int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size, int *stolen_size) { struct pci_dev *bridge_dev; u16 tmp; int stolen_overhead; if (!pdev || !aperture_size || !stolen_size) return 1; /* Find the bridge device. It is always 0:0.0 */ if (!(bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)))) { ERR_MSG("cannot find bridge device\n"); return 1; } /* Get the fb aperture size and "stolen" memory amount. */ tmp = 0; pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); pci_dev_put(bridge_dev); switch (pdev->device) { case PCI_DEVICE_ID_INTEL_915G: case PCI_DEVICE_ID_INTEL_915GM: case PCI_DEVICE_ID_INTEL_945G: case PCI_DEVICE_ID_INTEL_945GM: case PCI_DEVICE_ID_INTEL_945GME: case PCI_DEVICE_ID_INTEL_965G: case PCI_DEVICE_ID_INTEL_965GM: /* 915, 945 and 965 chipsets support a 256MB aperture. Aperture size is determined by inspected the base address of the aperture. */ if (pci_resource_start(pdev, 2) & 0x08000000) *aperture_size = MB(128); else *aperture_size = MB(256); break; default: if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) *aperture_size = MB(64); else *aperture_size = MB(128); break; } /* Stolen memory size is reduced by the GTT and the popup. GTT is 1K per MB of aperture size, and popup is 4K. */ stolen_overhead = (*aperture_size / MB(1)) + 4; switch(pdev->device) { case PCI_DEVICE_ID_INTEL_830M: case PCI_DEVICE_ID_INTEL_845G: switch (tmp & INTEL_830_GMCH_GMS_MASK) { case INTEL_830_GMCH_GMS_STOLEN_512: *stolen_size = KB(512) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_STOLEN_1024: *stolen_size = MB(1) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_STOLEN_8192: *stolen_size = MB(8) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_LOCAL: ERR_MSG("only local memory found\n"); return 1; case INTEL_830_GMCH_GMS_DISABLED: ERR_MSG("video memory is disabled\n"); return 1; default: ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n", tmp & INTEL_830_GMCH_GMS_MASK); return 1; } break; default: switch (tmp & INTEL_855_GMCH_GMS_MASK) { case INTEL_855_GMCH_GMS_STOLEN_1M: *stolen_size = MB(1) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_4M: *stolen_size = MB(4) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_8M: *stolen_size = MB(8) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_16M: *stolen_size = MB(16) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_32M: *stolen_size = MB(32) - KB(stolen_overhead); return 0; case INTEL_915G_GMCH_GMS_STOLEN_48M: *stolen_size = MB(48) - KB(stolen_overhead); return 0; case INTEL_915G_GMCH_GMS_STOLEN_64M: *stolen_size = MB(64) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_DISABLED: ERR_MSG("video memory is disabled\n"); return 0; default: ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n", tmp & INTEL_855_GMCH_GMS_MASK); return 1; } } } int intelfbhw_check_non_crt(struct intelfb_info *dinfo) { int dvo = 0; if (INREG(LVDS) & PORT_ENABLE) dvo |= LVDS_PORT; if (INREG(DVOA) & PORT_ENABLE) dvo |= DVOA_PORT; if (INREG(DVOB) & PORT_ENABLE) dvo |= DVOB_PORT; if (INREG(DVOC) & PORT_ENABLE) dvo |= DVOC_PORT; return dvo; } const char * intelfbhw_dvo_to_string(int dvo) { if (dvo & DVOA_PORT) return "DVO port A"; else if (dvo & DVOB_PORT) return "DVO port B"; else if (dvo & DVOC_PORT) return "DVO port C"; else if (dvo & LVDS_PORT) return "LVDS port"; else return NULL; } int intelfbhw_validate_mode(struct intelfb_info *dinfo, struct fb_var_screeninfo *var) { int bytes_per_pixel; int tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_validate_mode\n"); #endif bytes_per_pixel = var->bits_per_pixel / 8; if (bytes_per_pixel == 3) bytes_per_pixel = 4; /* Check if enough video memory. */ tmp = var->yres_virtual * var->xres_virtual * bytes_per_pixel; if (tmp > dinfo->fb.size) { WRN_MSG("Not enough video ram for mode " "(%d KByte vs %d KByte).\n", BtoKB(tmp), BtoKB(dinfo->fb.size)); return 1; } /* Check if x/y limits are OK. */ if (var->xres - 1 > HACTIVE_MASK) { WRN_MSG("X resolution too large (%d vs %d).\n", var->xres, HACTIVE_MASK + 1); return 1; } if (var->yres - 1 > VACTIVE_MASK) { WRN_MSG("Y resolution too large (%d vs %d).\n", var->yres, VACTIVE_MASK + 1); return 1; } if (var->xres < 4) { WRN_MSG("X resolution too small (%d vs 4).\n", var->xres); return 1; } if (var->yres < 4) { WRN_MSG("Y resolution too small (%d vs 4).\n", var->yres); return 1; } /* Check for doublescan modes. */ if (var->vmode & FB_VMODE_DOUBLE) { WRN_MSG("Mode is double-scan.\n"); return 1; } if ((var->vmode & FB_VMODE_INTERLACED) && (var->yres & 1)) { WRN_MSG("Odd number of lines in interlaced mode\n"); return 1; } /* Check if clock is OK. */ tmp = 1000000000 / var->pixclock; if (tmp < MIN_CLOCK) { WRN_MSG("Pixel clock is too low (%d MHz vs %d MHz).\n", (tmp + 500) / 1000, MIN_CLOCK / 1000); return 1; } if (tmp > MAX_CLOCK) { WRN_MSG("Pixel clock is too high (%d MHz vs %d MHz).\n", (tmp + 500) / 1000, MAX_CLOCK / 1000); return 1; } return 0; } int intelfbhw_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); u32 offset, xoffset, yoffset; #if VERBOSE > 0 DBG_MSG("intelfbhw_pan_display\n"); #endif xoffset = ROUND_DOWN_TO(var->xoffset, 8); yoffset = var->yoffset; if ((xoffset + var->xres > var->xres_virtual) || (yoffset + var->yres > var->yres_virtual)) return -EINVAL; offset = (yoffset * dinfo->pitch) + (xoffset * var->bits_per_pixel) / 8; offset += dinfo->fb.offset << 12; dinfo->vsync.pan_offset = offset; if ((var->activate & FB_ACTIVATE_VBL) && !intelfbhw_enable_irq(dinfo)) dinfo->vsync.pan_display = 1; else { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, offset); } return 0; } /* Blank the screen. */ void intelfbhw_do_blank(int blank, struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_blank: blank is %d\n", blank); #endif /* Turn plane A on or off */ tmp = INREG(DSPACNTR); if (blank) tmp &= ~DISPPLANE_PLANE_ENABLE; else tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); /* Flush */ tmp = INREG(DSPABASE); OUTREG(DSPABASE, tmp); /* Turn off/on the HW cursor */ #if VERBOSE > 0 DBG_MSG("cursor_on is %d\n", dinfo->cursor_on); #endif if (dinfo->cursor_on) { if (blank) intelfbhw_cursor_hide(dinfo); else intelfbhw_cursor_show(dinfo); dinfo->cursor_on = 1; } dinfo->cursor_blanked = blank; /* Set DPMS level */ tmp = INREG(ADPA) & ~ADPA_DPMS_CONTROL_MASK; switch (blank) { case FB_BLANK_UNBLANK: case FB_BLANK_NORMAL: tmp |= ADPA_DPMS_D0; break; case FB_BLANK_VSYNC_SUSPEND: tmp |= ADPA_DPMS_D1; break; case FB_BLANK_HSYNC_SUSPEND: tmp |= ADPA_DPMS_D2; break; case FB_BLANK_POWERDOWN: tmp |= ADPA_DPMS_D3; break; } OUTREG(ADPA, tmp); return; } /* Check which pipe is connected to an active display plane. */ int intelfbhw_active_pipe(const struct intelfb_hwstate *hw) { int pipe = -1; /* keep old default behaviour - prefer PIPE_A */ if (hw->disp_b_ctrl & DISPPLANE_PLANE_ENABLE) { pipe = (hw->disp_b_ctrl >> DISPPLANE_SEL_PIPE_SHIFT); pipe &= PIPE_MASK; if (unlikely(pipe == PIPE_A)) return PIPE_A; } if (hw->disp_a_ctrl & DISPPLANE_PLANE_ENABLE) { pipe = (hw->disp_a_ctrl >> DISPPLANE_SEL_PIPE_SHIFT); pipe &= PIPE_MASK; if (likely(pipe == PIPE_A)) return PIPE_A; } /* Impossible that no pipe is selected - return PIPE_A */ WARN_ON(pipe == -1); if (unlikely(pipe == -1)) pipe = PIPE_A; return pipe; } void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp) { u32 palette_reg = (dinfo->pipe == PIPE_A) ? PALETTE_A : PALETTE_B; #if VERBOSE > 0 DBG_MSG("intelfbhw_setcolreg: %d: (%d, %d, %d)\n", regno, red, green, blue); #endif OUTREG(palette_reg + (regno << 2), (red << PALETTE_8_RED_SHIFT) | (green << PALETTE_8_GREEN_SHIFT) | (blue << PALETTE_8_BLUE_SHIFT)); } int intelfbhw_read_hw_state(struct intelfb_info *dinfo, struct intelfb_hwstate *hw, int flag) { int i; #if VERBOSE > 0 DBG_MSG("intelfbhw_read_hw_state\n"); #endif if (!hw || !dinfo) return -1; /* Read in as much of the HW state as possible. */ hw->vga0_divisor = INREG(VGA0_DIVISOR); hw->vga1_divisor = INREG(VGA1_DIVISOR); hw->vga_pd = INREG(VGAPD); hw->dpll_a = INREG(DPLL_A); hw->dpll_b = INREG(DPLL_B); hw->fpa0 = INREG(FPA0); hw->fpa1 = INREG(FPA1); hw->fpb0 = INREG(FPB0); hw->fpb1 = INREG(FPB1); if (flag == 1) return flag; #if 0 /* This seems to be a problem with the 852GM/855GM */ for (i = 0; i < PALETTE_8_ENTRIES; i++) { hw->palette_a[i] = INREG(PALETTE_A + (i << 2)); hw->palette_b[i] = INREG(PALETTE_B + (i << 2)); } #endif if (flag == 2) return flag; hw->htotal_a = INREG(HTOTAL_A); hw->hblank_a = INREG(HBLANK_A); hw->hsync_a = INREG(HSYNC_A); hw->vtotal_a = INREG(VTOTAL_A); hw->vblank_a = INREG(VBLANK_A); hw->vsync_a = INREG(VSYNC_A); hw->src_size_a = INREG(SRC_SIZE_A); hw->bclrpat_a = INREG(BCLRPAT_A); hw->htotal_b = INREG(HTOTAL_B); hw->hblank_b = INREG(HBLANK_B); hw->hsync_b = INREG(HSYNC_B); hw->vtotal_b = INREG(VTOTAL_B); hw->vblank_b = INREG(VBLANK_B); hw->vsync_b = INREG(VSYNC_B); hw->src_size_b = INREG(SRC_SIZE_B); hw->bclrpat_b = INREG(BCLRPAT_B); if (flag == 3) return flag; hw->adpa = INREG(ADPA); hw->dvoa = INREG(DVOA); hw->dvob = INREG(DVOB); hw->dvoc = INREG(DVOC); hw->dvoa_srcdim = INREG(DVOA_SRCDIM); hw->dvob_srcdim = INREG(DVOB_SRCDIM); hw->dvoc_srcdim = INREG(DVOC_SRCDIM); hw->lvds = INREG(LVDS); if (flag == 4) return flag; hw->pipe_a_conf = INREG(PIPEACONF); hw->pipe_b_conf = INREG(PIPEBCONF); hw->disp_arb = INREG(DISPARB); if (flag == 5) return flag; hw->cursor_a_control = INREG(CURSOR_A_CONTROL); hw->cursor_b_control = INREG(CURSOR_B_CONTROL); hw->cursor_a_base = INREG(CURSOR_A_BASEADDR); hw->cursor_b_base = INREG(CURSOR_B_BASEADDR); if (flag == 6) return flag; for (i = 0; i < 4; i++) { hw->cursor_a_palette[i] = INREG(CURSOR_A_PALETTE0 + (i << 2)); hw->cursor_b_palette[i] = INREG(CURSOR_B_PALETTE0 + (i << 2)); } if (flag == 7) return flag; hw->cursor_size = INREG(CURSOR_SIZE); if (flag == 8) return flag; hw->disp_a_ctrl = INREG(DSPACNTR); hw->disp_b_ctrl = INREG(DSPBCNTR); hw->disp_a_base = INREG(DSPABASE); hw->disp_b_base = INREG(DSPBBASE); hw->disp_a_stride = INREG(DSPASTRIDE); hw->disp_b_stride = INREG(DSPBSTRIDE); if (flag == 9) return flag; hw->vgacntrl = INREG(VGACNTRL); if (flag == 10) return flag; hw->add_id = INREG(ADD_ID); if (flag == 11) return flag; for (i = 0; i < 7; i++) { hw->swf0x[i] = INREG(SWF00 + (i << 2)); hw->swf1x[i] = INREG(SWF10 + (i << 2)); if (i < 3) hw->swf3x[i] = INREG(SWF30 + (i << 2)); } for (i = 0; i < 8; i++) hw->fence[i] = INREG(FENCE + (i << 2)); hw->instpm = INREG(INSTPM); hw->mem_mode = INREG(MEM_MODE); hw->fw_blc_0 = INREG(FW_BLC_0); hw->fw_blc_1 = INREG(FW_BLC_1); hw->hwstam = INREG16(HWSTAM); hw->ier = INREG16(IER); hw->iir = INREG16(IIR); hw->imr = INREG16(IMR); return 0; } static int calc_vclock3(int index, int m, int n, int p) { if (p == 0 || n == 0) return 0; return plls[index].ref_clk * m / n / p; } static int calc_vclock(int index, int m1, int m2, int n, int p1, int p2, int lvds) { struct pll_min_max *pll = &plls[index]; u32 m, vco, p; m = (5 * (m1 + 2)) + (m2 + 2); n += 2; vco = pll->ref_clk * m / n; if (index == PLLS_I8xx) p = ((p1 + 2) * (1 << (p2 + 1))); else p = ((p1) * (p2 ? 5 : 10)); return vco / p; } #if REGDUMP static void intelfbhw_get_p1p2(struct intelfb_info *dinfo, int dpll, int *o_p1, int *o_p2) { int p1, p2; if (IS_I9XX(dinfo)) { if (dpll & DPLL_P1_FORCE_DIV2) p1 = 1; else p1 = (dpll >> DPLL_P1_SHIFT) & 0xff; p1 = ffs(p1); p2 = (dpll >> DPLL_I9XX_P2_SHIFT) & DPLL_P2_MASK; } else { if (dpll & DPLL_P1_FORCE_DIV2) p1 = 0; else p1 = (dpll >> DPLL_P1_SHIFT) & DPLL_P1_MASK; p2 = (dpll >> DPLL_P2_SHIFT) & DPLL_P2_MASK; } *o_p1 = p1; *o_p2 = p2; } #endif void intelfbhw_print_hw_state(struct intelfb_info *dinfo, struct intelfb_hwstate *hw) { #if REGDUMP int i, m1, m2, n, p1, p2; int index = dinfo->pll_index; DBG_MSG("intelfbhw_print_hw_state\n"); if (!hw) return; /* Read in as much of the HW state as possible. */ printk("hw state dump start\n"); printk(" VGA0_DIVISOR: 0x%08x\n", hw->vga0_divisor); printk(" VGA1_DIVISOR: 0x%08x\n", hw->vga1_divisor); printk(" VGAPD: 0x%08x\n", hw->vga_pd); n = (hw->vga0_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->vga0_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->vga0_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2); printk(" VGA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" VGA0: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); n = (hw->vga1_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->vga1_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->vga1_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2); printk(" VGA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" VGA1: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); printk(" DPLL_A: 0x%08x\n", hw->dpll_a); printk(" DPLL_B: 0x%08x\n", hw->dpll_b); printk(" FPA0: 0x%08x\n", hw->fpa0); printk(" FPA1: 0x%08x\n", hw->fpa1); printk(" FPB0: 0x%08x\n", hw->fpb0); printk(" FPB1: 0x%08x\n", hw->fpb1); n = (hw->fpa0 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->fpa0 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->fpa0 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2); printk(" PLLA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" PLLA0: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); n = (hw->fpa1 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->fpa1 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->fpa1 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2); printk(" PLLA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" PLLA1: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); #if 0 printk(" PALETTE_A:\n"); for (i = 0; i < PALETTE_8_ENTRIES) printk(" %3d: 0x%08x\n", i, hw->palette_a[i]); printk(" PALETTE_B:\n"); for (i = 0; i < PALETTE_8_ENTRIES) printk(" %3d: 0x%08x\n", i, hw->palette_b[i]); #endif printk(" HTOTAL_A: 0x%08x\n", hw->htotal_a); printk(" HBLANK_A: 0x%08x\n", hw->hblank_a); printk(" HSYNC_A: 0x%08x\n", hw->hsync_a); printk(" VTOTAL_A: 0x%08x\n", hw->vtotal_a); printk(" VBLANK_A: 0x%08x\n", hw->vblank_a); printk(" VSYNC_A: 0x%08x\n", hw->vsync_a); printk(" SRC_SIZE_A: 0x%08x\n", hw->src_size_a); printk(" BCLRPAT_A: 0x%08x\n", hw->bclrpat_a); printk(" HTOTAL_B: 0x%08x\n", hw->htotal_b); printk(" HBLANK_B: 0x%08x\n", hw->hblank_b); printk(" HSYNC_B: 0x%08x\n", hw->hsync_b); printk(" VTOTAL_B: 0x%08x\n", hw->vtotal_b); printk(" VBLANK_B: 0x%08x\n", hw->vblank_b); printk(" VSYNC_B: 0x%08x\n", hw->vsync_b); printk(" SRC_SIZE_B: 0x%08x\n", hw->src_size_b); printk(" BCLRPAT_B: 0x%08x\n", hw->bclrpat_b); printk(" ADPA: 0x%08x\n", hw->adpa); printk(" DVOA: 0x%08x\n", hw->dvoa); printk(" DVOB: 0x%08x\n", hw->dvob); printk(" DVOC: 0x%08x\n", hw->dvoc); printk(" DVOA_SRCDIM: 0x%08x\n", hw->dvoa_srcdim); printk(" DVOB_SRCDIM: 0x%08x\n", hw->dvob_srcdim); printk(" DVOC_SRCDIM: 0x%08x\n", hw->dvoc_srcdim); printk(" LVDS: 0x%08x\n", hw->lvds); printk(" PIPEACONF: 0x%08x\n", hw->pipe_a_conf); printk(" PIPEBCONF: 0x%08x\n", hw->pipe_b_conf); printk(" DISPARB: 0x%08x\n", hw->disp_arb); printk(" CURSOR_A_CONTROL: 0x%08x\n", hw->cursor_a_control); printk(" CURSOR_B_CONTROL: 0x%08x\n", hw->cursor_b_control); printk(" CURSOR_A_BASEADDR: 0x%08x\n", hw->cursor_a_base); printk(" CURSOR_B_BASEADDR: 0x%08x\n", hw->cursor_b_base); printk(" CURSOR_A_PALETTE: "); for (i = 0; i < 4; i++) { printk("0x%08x", hw->cursor_a_palette[i]); if (i < 3) printk(", "); } printk("\n"); printk(" CURSOR_B_PALETTE: "); for (i = 0; i < 4; i++) { printk("0x%08x", hw->cursor_b_palette[i]); if (i < 3) printk(", "); } printk("\n"); printk(" CURSOR_SIZE: 0x%08x\n", hw->cursor_size); printk(" DSPACNTR: 0x%08x\n", hw->disp_a_ctrl); printk(" DSPBCNTR: 0x%08x\n", hw->disp_b_ctrl); printk(" DSPABASE: 0x%08x\n", hw->disp_a_base); printk(" DSPBBASE: 0x%08x\n", hw->disp_b_base); printk(" DSPASTRIDE: 0x%08x\n", hw->disp_a_stride); printk(" DSPBSTRIDE: 0x%08x\n", hw->disp_b_stride); printk(" VGACNTRL: 0x%08x\n", hw->vgacntrl); printk(" ADD_ID: 0x%08x\n", hw->add_id); for (i = 0; i < 7; i++) { printk(" SWF0%d 0x%08x\n", i, hw->swf0x[i]); } for (i = 0; i < 7; i++) { printk(" SWF1%d 0x%08x\n", i, hw->swf1x[i]); } for (i = 0; i < 3; i++) { printk(" SWF3%d 0x%08x\n", i, hw->swf3x[i]); } for (i = 0; i < 8; i++) printk(" FENCE%d 0x%08x\n", i, hw->fence[i]); printk(" INSTPM 0x%08x\n", hw->instpm); printk(" MEM_MODE 0x%08x\n", hw->mem_mode); printk(" FW_BLC_0 0x%08x\n", hw->fw_blc_0); printk(" FW_BLC_1 0x%08x\n", hw->fw_blc_1); printk(" HWSTAM 0x%04x\n", hw->hwstam); printk(" IER 0x%04x\n", hw->ier); printk(" IIR 0x%04x\n", hw->iir); printk(" IMR 0x%04x\n", hw->imr); printk("hw state dump end\n"); #endif } /* Split the M parameter into M1 and M2. */ static int splitm(int index, unsigned int m, unsigned int *retm1, unsigned int *retm2) { int m1, m2; int testm; struct pll_min_max *pll = &plls[index]; /* no point optimising too much - brute force m */ for (m1 = pll->min_m1; m1 < pll->max_m1 + 1; m1++) { for (m2 = pll->min_m2; m2 < pll->max_m2 + 1; m2++) { testm = (5 * (m1 + 2)) + (m2 + 2); if (testm == m) { *retm1 = (unsigned int)m1; *retm2 = (unsigned int)m2; return 0; } } } return 1; } /* Split the P parameter into P1 and P2. */ static int splitp(int index, unsigned int p, unsigned int *retp1, unsigned int *retp2) { int p1, p2; struct pll_min_max *pll = &plls[index]; if (index == PLLS_I9xx) { p2 = (p % 10) ? 1 : 0; p1 = p / (p2 ? 5 : 10); *retp1 = (unsigned int)p1; *retp2 = (unsigned int)p2; return 0; } if (p % 4 == 0) p2 = 1; else p2 = 0; p1 = (p / (1 << (p2 + 1))) - 2; if (p % 4 == 0 && p1 < pll->min_p1) { p2 = 0; p1 = (p / (1 << (p2 + 1))) - 2; } if (p1 < pll->min_p1 || p1 > pll->max_p1 || (p1 + 2) * (1 << (p2 + 1)) != p) { return 1; } else { *retp1 = (unsigned int)p1; *retp2 = (unsigned int)p2; return 0; } } static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2, u32 *retn, u32 *retp1, u32 *retp2, u32 *retclock) { u32 m1, m2, n, p1, p2, n1, testm; u32 f_vco, p, p_best = 0, m, f_out = 0; u32 err_max, err_target, err_best = 10000000; u32 n_best = 0, m_best = 0, f_best, f_err; u32 p_min, p_max, p_inc, div_max; struct pll_min_max *pll = &plls[index]; /* Accept 0.5% difference, but aim for 0.1% */ err_max = 5 * clock / 1000; err_target = clock / 1000; DBG_MSG("Clock is %d\n", clock); div_max = pll->max_vco / clock; p_inc = (clock <= pll->p_transition_clk) ? pll->p_inc_lo : pll->p_inc_hi; p_min = p_inc; p_max = ROUND_DOWN_TO(div_max, p_inc); if (p_min < pll->min_p) p_min = pll->min_p; if (p_max > pll->max_p) p_max = pll->max_p; DBG_MSG("p range is %d-%d (%d)\n", p_min, p_max, p_inc); p = p_min; do { if (splitp(index, p, &p1, &p2)) { WRN_MSG("cannot split p = %d\n", p); p += p_inc; continue; } n = pll->min_n; f_vco = clock * p; do { m = ROUND_UP_TO(f_vco * n, pll->ref_clk) / pll->ref_clk; if (m < pll->min_m) m = pll->min_m + 1; if (m > pll->max_m) m = pll->max_m - 1; for (testm = m - 1; testm <= m; testm++) { f_out = calc_vclock3(index, testm, n, p); if (splitm(index, testm, &m1, &m2)) { WRN_MSG("cannot split m = %d\n", testm); continue; } if (clock > f_out) f_err = clock - f_out; else/* slightly bias the error for bigger clocks */ f_err = f_out - clock + 1; if (f_err < err_best) { m_best = testm; n_best = n; p_best = p; f_best = f_out; err_best = f_err; } } n++; } while ((n <= pll->max_n) && (f_out >= clock)); p += p_inc; } while ((p <= p_max)); if (!m_best) { WRN_MSG("cannot find parameters for clock %d\n", clock); return 1; } m = m_best; n = n_best; p = p_best; splitm(index, m, &m1, &m2); splitp(index, p, &p1, &p2); n1 = n - 2; DBG_MSG("m, n, p: %d (%d,%d), %d (%d), %d (%d,%d), " "f: %d (%d), VCO: %d\n", m, m1, m2, n, n1, p, p1, p2, calc_vclock3(index, m, n, p), calc_vclock(index, m1, m2, n1, p1, p2, 0), calc_vclock3(index, m, n, p) * p); *retm1 = m1; *retm2 = m2; *retn = n1; *retp1 = p1; *retp2 = p2; *retclock = calc_vclock(index, m1, m2, n1, p1, p2, 0); return 0; } static __inline__ int check_overflow(u32 value, u32 limit, const char *description) { if (value > limit) { WRN_MSG("%s value %d exceeds limit %d\n", description, value, limit); return 1; } return 0; } /* It is assumed that hw is filled in with the initial state information. */ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo, struct intelfb_hwstate *hw, struct fb_var_screeninfo *var) { int pipe = intelfbhw_active_pipe(hw); u32 *dpll, *fp0, *fp1; u32 m1, m2, n, p1, p2, clock_target, clock; u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive; u32 vsync_start, vsync_end, vblank_start, vblank_end, vtotal, vactive; u32 vsync_pol, hsync_pol; u32 *vs, *vb, *vt, *hs, *hb, *ht, *ss, *pipe_conf; u32 stride_alignment; DBG_MSG("intelfbhw_mode_to_hw\n"); /* Disable VGA */ hw->vgacntrl |= VGA_DISABLE; /* Set which pipe's registers will be set. */ if (pipe == PIPE_B) { dpll = &hw->dpll_b; fp0 = &hw->fpb0; fp1 = &hw->fpb1; hs = &hw->hsync_b; hb = &hw->hblank_b; ht = &hw->htotal_b; vs = &hw->vsync_b; vb = &hw->vblank_b; vt = &hw->vtotal_b; ss = &hw->src_size_b; pipe_conf = &hw->pipe_b_conf; } else { dpll = &hw->dpll_a; fp0 = &hw->fpa0; fp1 = &hw->fpa1; hs = &hw->hsync_a; hb = &hw->hblank_a; ht = &hw->htotal_a; vs = &hw->vsync_a; vb = &hw->vblank_a; vt = &hw->vtotal_a; ss = &hw->src_size_a; pipe_conf = &hw->pipe_a_conf; } /* Use ADPA register for sync control. */ hw->adpa &= ~ADPA_USE_VGA_HVPOLARITY; /* sync polarity */ hsync_pol = (var->sync & FB_SYNC_HOR_HIGH_ACT) ? ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW; vsync_pol = (var->sync & FB_SYNC_VERT_HIGH_ACT) ? ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW; hw->adpa &= ~((ADPA_SYNC_ACTIVE_MASK << ADPA_VSYNC_ACTIVE_SHIFT) | (ADPA_SYNC_ACTIVE_MASK << ADPA_HSYNC_ACTIVE_SHIFT)); hw->adpa |= (hsync_pol << ADPA_HSYNC_ACTIVE_SHIFT) | (vsync_pol << ADPA_VSYNC_ACTIVE_SHIFT); /* Connect correct pipe to the analog port DAC */ hw->adpa &= ~(PIPE_MASK << ADPA_PIPE_SELECT_SHIFT); hw->adpa |= (pipe << ADPA_PIPE_SELECT_SHIFT); /* Set DPMS state to D0 (on) */ hw->adpa &= ~ADPA_DPMS_CONTROL_MASK; hw->adpa |= ADPA_DPMS_D0; hw->adpa |= ADPA_DAC_ENABLE; *dpll |= (DPLL_VCO_ENABLE | DPLL_VGA_MODE_DISABLE); *dpll &= ~(DPLL_RATE_SELECT_MASK | DPLL_REFERENCE_SELECT_MASK); *dpll |= (DPLL_REFERENCE_DEFAULT | DPLL_RATE_SELECT_FP0); /* Desired clock in kHz */ clock_target = 1000000000 / var->pixclock; if (calc_pll_params(dinfo->pll_index, clock_target, &m1, &m2, &n, &p1, &p2, &clock)) { WRN_MSG("calc_pll_params failed\n"); return 1; } /* Check for overflow. */ if (check_overflow(p1, DPLL_P1_MASK, "PLL P1 parameter")) return 1; if (check_overflow(p2, DPLL_P2_MASK, "PLL P2 parameter")) return 1; if (check_overflow(m1, FP_DIVISOR_MASK, "PLL M1 parameter")) return 1; if (check_overflow(m2, FP_DIVISOR_MASK, "PLL M2 parameter")) return 1; if (check_overflow(n, FP_DIVISOR_MASK, "PLL N parameter")) return 1; *dpll &= ~DPLL_P1_FORCE_DIV2; *dpll &= ~((DPLL_P2_MASK << DPLL_P2_SHIFT) | (DPLL_P1_MASK << DPLL_P1_SHIFT)); if (IS_I9XX(dinfo)) { *dpll |= (p2 << DPLL_I9XX_P2_SHIFT); *dpll |= (1 << (p1 - 1)) << DPLL_P1_SHIFT; } else *dpll |= (p2 << DPLL_P2_SHIFT) | (p1 << DPLL_P1_SHIFT); *fp0 = (n << FP_N_DIVISOR_SHIFT) | (m1 << FP_M1_DIVISOR_SHIFT) | (m2 << FP_M2_DIVISOR_SHIFT); *fp1 = *fp0; hw->dvob &= ~PORT_ENABLE; hw->dvoc &= ~PORT_ENABLE; /* Use display plane A. */ hw->disp_a_ctrl |= DISPPLANE_PLANE_ENABLE; hw->disp_a_ctrl &= ~DISPPLANE_GAMMA_ENABLE; hw->disp_a_ctrl &= ~DISPPLANE_PIXFORMAT_MASK; switch (intelfb_var_to_depth(var)) { case 8: hw->disp_a_ctrl |= DISPPLANE_8BPP | DISPPLANE_GAMMA_ENABLE; break; case 15: hw->disp_a_ctrl |= DISPPLANE_15_16BPP; break; case 16: hw->disp_a_ctrl |= DISPPLANE_16BPP; break; case 24: hw->disp_a_ctrl |= DISPPLANE_32BPP_NO_ALPHA; break; } hw->disp_a_ctrl &= ~(PIPE_MASK << DISPPLANE_SEL_PIPE_SHIFT); hw->disp_a_ctrl |= (pipe << DISPPLANE_SEL_PIPE_SHIFT); /* Set CRTC registers. */ hactive = var->xres; hsync_start = hactive + var->right_margin; hsync_end = hsync_start + var->hsync_len; htotal = hsync_end + var->left_margin; hblank_start = hactive; hblank_end = htotal; DBG_MSG("H: act %d, ss %d, se %d, tot %d bs %d, be %d\n", hactive, hsync_start, hsync_end, htotal, hblank_start, hblank_end); vactive = var->yres; if (var->vmode & FB_VMODE_INTERLACED) vactive--; /* the chip adds 2 halflines automatically */ vsync_start = vactive + var->lower_margin; vsync_end = vsync_start + var->vsync_len; vtotal = vsync_end + var->upper_margin; vblank_start = vactive; vblank_end = vtotal; vblank_end = vsync_end + 1; DBG_MSG("V: act %d, ss %d, se %d, tot %d bs %d, be %d\n", vactive, vsync_start, vsync_end, vtotal, vblank_start, vblank_end); /* Adjust for register values, and check for overflow. */ hactive--; if (check_overflow(hactive, HACTIVE_MASK, "CRTC hactive")) return 1; hsync_start--; if (check_overflow(hsync_start, HSYNCSTART_MASK, "CRTC hsync_start")) return 1; hsync_end--; if (check_overflow(hsync_end, HSYNCEND_MASK, "CRTC hsync_end")) return 1; htotal--; if (check_overflow(htotal, HTOTAL_MASK, "CRTC htotal")) return 1; hblank_start--; if (check_overflow(hblank_start, HBLANKSTART_MASK, "CRTC hblank_start")) return 1; hblank_end--; if (check_overflow(hblank_end, HBLANKEND_MASK, "CRTC hblank_end")) return 1; vactive--; if (check_overflow(vactive, VACTIVE_MASK, "CRTC vactive")) return 1; vsync_start--; if (check_overflow(vsync_start, VSYNCSTART_MASK, "CRTC vsync_start")) return 1; vsync_end--; if (check_overflow(vsync_end, VSYNCEND_MASK, "CRTC vsync_end")) return 1; vtotal--; if (check_overflow(vtotal, VTOTAL_MASK, "CRTC vtotal")) return 1; vblank_start--; if (check_overflow(vblank_start, VBLANKSTART_MASK, "CRTC vblank_start")) return 1; vblank_end--; if (check_overflow(vblank_end, VBLANKEND_MASK, "CRTC vblank_end")) return 1; *ht = (htotal << HTOTAL_SHIFT) | (hactive << HACTIVE_SHIFT); *hb = (hblank_start << HBLANKSTART_SHIFT) | (hblank_end << HSYNCEND_SHIFT); *hs = (hsync_start << HSYNCSTART_SHIFT) | (hsync_end << HSYNCEND_SHIFT); *vt = (vtotal << VTOTAL_SHIFT) | (vactive << VACTIVE_SHIFT); *vb = (vblank_start << VBLANKSTART_SHIFT) | (vblank_end << VSYNCEND_SHIFT); *vs = (vsync_start << VSYNCSTART_SHIFT) | (vsync_end << VSYNCEND_SHIFT); *ss = (hactive << SRC_SIZE_HORIZ_SHIFT) | (vactive << SRC_SIZE_VERT_SHIFT); hw->disp_a_stride = dinfo->pitch; DBG_MSG("pitch is %d\n", hw->disp_a_stride); hw->disp_a_base = hw->disp_a_stride * var->yoffset + var->xoffset * var->bits_per_pixel / 8; hw->disp_a_base += dinfo->fb.offset << 12; /* Check stride alignment. */ stride_alignment = IS_I9XX(dinfo) ? STRIDE_ALIGNMENT_I9XX : STRIDE_ALIGNMENT; if (hw->disp_a_stride % stride_alignment != 0) { WRN_MSG("display stride %d has bad alignment %d\n", hw->disp_a_stride, stride_alignment); return 1; } /* Set the palette to 8-bit mode. */ *pipe_conf &= ~PIPECONF_GAMMA; if (var->vmode & FB_VMODE_INTERLACED) *pipe_conf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else *pipe_conf &= ~PIPECONF_INTERLACE_MASK; return 0; } /* Program a (non-VGA) video mode. */ int intelfbhw_program_mode(struct intelfb_info *dinfo, const struct intelfb_hwstate *hw, int blank) { u32 tmp; const u32 *dpll, *fp0, *fp1, *pipe_conf; const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss; u32 dpll_reg, fp0_reg, fp1_reg, pipe_conf_reg, pipe_stat_reg; u32 hsync_reg, htotal_reg, hblank_reg; u32 vsync_reg, vtotal_reg, vblank_reg; u32 src_size_reg; u32 count, tmp_val[3]; /* Assume single pipe */ #if VERBOSE > 0 DBG_MSG("intelfbhw_program_mode\n"); #endif /* Disable VGA */ tmp = INREG(VGACNTRL); tmp |= VGA_DISABLE; OUTREG(VGACNTRL, tmp); dinfo->pipe = intelfbhw_active_pipe(hw); if (dinfo->pipe == PIPE_B) { dpll = &hw->dpll_b; fp0 = &hw->fpb0; fp1 = &hw->fpb1; pipe_conf = &hw->pipe_b_conf; hs = &hw->hsync_b; hb = &hw->hblank_b; ht = &hw->htotal_b; vs = &hw->vsync_b; vb = &hw->vblank_b; vt = &hw->vtotal_b; ss = &hw->src_size_b; dpll_reg = DPLL_B; fp0_reg = FPB0; fp1_reg = FPB1; pipe_conf_reg = PIPEBCONF; pipe_stat_reg = PIPEBSTAT; hsync_reg = HSYNC_B; htotal_reg = HTOTAL_B; hblank_reg = HBLANK_B; vsync_reg = VSYNC_B; vtotal_reg = VTOTAL_B; vblank_reg = VBLANK_B; src_size_reg = SRC_SIZE_B; } else { dpll = &hw->dpll_a; fp0 = &hw->fpa0; fp1 = &hw->fpa1; pipe_conf = &hw->pipe_a_conf; hs = &hw->hsync_a; hb = &hw->hblank_a; ht = &hw->htotal_a; vs = &hw->vsync_a; vb = &hw->vblank_a; vt = &hw->vtotal_a; ss = &hw->src_size_a; dpll_reg = DPLL_A; fp0_reg = FPA0; fp1_reg = FPA1; pipe_conf_reg = PIPEACONF; pipe_stat_reg = PIPEASTAT; hsync_reg = HSYNC_A; htotal_reg = HTOTAL_A; hblank_reg = HBLANK_A; vsync_reg = VSYNC_A; vtotal_reg = VTOTAL_A; vblank_reg = VBLANK_A; src_size_reg = SRC_SIZE_A; } /* turn off pipe */ tmp = INREG(pipe_conf_reg); tmp &= ~PIPECONF_ENABLE; OUTREG(pipe_conf_reg, tmp); count = 0; do { tmp_val[count % 3] = INREG(PIPEA_DSL); if ((tmp_val[0] == tmp_val[1]) && (tmp_val[1] == tmp_val[2])) break; count++; udelay(1); if (count % 200 == 0) { tmp = INREG(pipe_conf_reg); tmp &= ~PIPECONF_ENABLE; OUTREG(pipe_conf_reg, tmp); } } while (count < 2000); OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE); /* Disable planes A and B. */ tmp = INREG(DSPACNTR); tmp &= ~DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); tmp = INREG(DSPBCNTR); tmp &= ~DISPPLANE_PLANE_ENABLE; OUTREG(DSPBCNTR, tmp); /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */ mdelay(20); OUTREG(DVOB, INREG(DVOB) & ~PORT_ENABLE); OUTREG(DVOC, INREG(DVOC) & ~PORT_ENABLE); OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE); /* Disable Sync */ tmp = INREG(ADPA); tmp &= ~ADPA_DPMS_CONTROL_MASK; tmp |= ADPA_DPMS_D3; OUTREG(ADPA, tmp); /* do some funky magic - xyzzy */ OUTREG(0x61204, 0xabcd0000); /* turn off PLL */ tmp = INREG(dpll_reg); tmp &= ~DPLL_VCO_ENABLE; OUTREG(dpll_reg, tmp); /* Set PLL parameters */ OUTREG(fp0_reg, *fp0); OUTREG(fp1_reg, *fp1); /* Enable PLL */ OUTREG(dpll_reg, *dpll); /* Set DVOs B/C */ OUTREG(DVOB, hw->dvob); OUTREG(DVOC, hw->dvoc); /* undo funky magic */ OUTREG(0x61204, 0x00000000); /* Set ADPA */ OUTREG(ADPA, INREG(ADPA) | ADPA_DAC_ENABLE); OUTREG(ADPA, (hw->adpa & ~(ADPA_DPMS_CONTROL_MASK)) | ADPA_DPMS_D3); /* Set pipe parameters */ OUTREG(hsync_reg, *hs); OUTREG(hblank_reg, *hb); OUTREG(htotal_reg, *ht); OUTREG(vsync_reg, *vs); OUTREG(vblank_reg, *vb); OUTREG(vtotal_reg, *vt); OUTREG(src_size_reg, *ss); switch (dinfo->info->var.vmode & (FB_VMODE_INTERLACED | FB_VMODE_ODD_FLD_FIRST)) { case FB_VMODE_INTERLACED | FB_VMODE_ODD_FLD_FIRST: OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_ODD_EN); break; case FB_VMODE_INTERLACED: /* even lines first */ OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_EVEN_EN); break; default: /* non-interlaced */ OUTREG(pipe_stat_reg, 0xFFFF); /* clear all status bits only */ } /* Enable pipe */ OUTREG(pipe_conf_reg, *pipe_conf | PIPECONF_ENABLE); /* Enable sync */ tmp = INREG(ADPA); tmp &= ~ADPA_DPMS_CONTROL_MASK; tmp |= ADPA_DPMS_D0; OUTREG(ADPA, tmp); /* setup display plane */ if (dinfo->pdev->device == PCI_DEVICE_ID_INTEL_830M) { /* * i830M errata: the display plane must be enabled * to allow writes to the other bits in the plane * control register. */ tmp = INREG(DSPACNTR); if ((tmp & DISPPLANE_PLANE_ENABLE) != DISPPLANE_PLANE_ENABLE) { tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); OUTREG(DSPACNTR, hw->disp_a_ctrl|DISPPLANE_PLANE_ENABLE); mdelay(1); } } OUTREG(DSPACNTR, hw->disp_a_ctrl & ~DISPPLANE_PLANE_ENABLE); OUTREG(DSPASTRIDE, hw->disp_a_stride); OUTREG(DSPABASE, hw->disp_a_base); /* Enable plane */ if (!blank) { tmp = INREG(DSPACNTR); tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); OUTREG(DSPABASE, hw->disp_a_base); } return 0; } /* forward declarations */ static void refresh_ring(struct intelfb_info *dinfo); static void reset_state(struct intelfb_info *dinfo); static void do_flush(struct intelfb_info *dinfo); static u32 get_ring_space(struct intelfb_info *dinfo) { u32 ring_space; if (dinfo->ring_tail >= dinfo->ring_head) ring_space = dinfo->ring.size - (dinfo->ring_tail - dinfo->ring_head); else ring_space = dinfo->ring_head - dinfo->ring_tail; if (ring_space > RING_MIN_FREE) ring_space -= RING_MIN_FREE; else ring_space = 0; return ring_space; } static int wait_ring(struct intelfb_info *dinfo, int n) { int i = 0; unsigned long end; u32 last_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; #if VERBOSE > 0 DBG_MSG("wait_ring: %d\n", n); #endif end = jiffies + (HZ * 3); while (dinfo->ring_space < n) { dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; dinfo->ring_space = get_ring_space(dinfo); if (dinfo->ring_head != last_head) { end = jiffies + (HZ * 3); last_head = dinfo->ring_head; } i++; if (time_before(end, jiffies)) { if (!i) { /* Try again */ reset_state(dinfo); refresh_ring(dinfo); do_flush(dinfo); end = jiffies + (HZ * 3); i = 1; } else { WRN_MSG("ring buffer : space: %d wanted %d\n", dinfo->ring_space, n); WRN_MSG("lockup - turning off hardware " "acceleration\n"); dinfo->ring_lockup = 1; break; } } udelay(1); } return i; } static void do_flush(struct intelfb_info *dinfo) { START_RING(2); OUT_RING(MI_FLUSH | MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE); OUT_RING(MI_NOOP); ADVANCE_RING(); } void intelfbhw_do_sync(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_do_sync\n"); #endif if (!dinfo->accel) return; /* * Send a flush, then wait until the ring is empty. This is what * the XFree86 driver does, and actually it doesn't seem a lot worse * than the recommended method (both have problems). */ do_flush(dinfo); wait_ring(dinfo, dinfo->ring.size - RING_MIN_FREE); dinfo->ring_space = dinfo->ring.size - RING_MIN_FREE; } static void refresh_ring(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("refresh_ring\n"); #endif dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; dinfo->ring_tail = INREG(PRI_RING_TAIL) & RING_TAIL_MASK; dinfo->ring_space = get_ring_space(dinfo); } static void reset_state(struct intelfb_info *dinfo) { int i; u32 tmp; #if VERBOSE > 0 DBG_MSG("reset_state\n"); #endif for (i = 0; i < FENCE_NUM; i++) OUTREG(FENCE + (i << 2), 0); /* Flush the ring buffer if it's enabled. */ tmp = INREG(PRI_RING_LENGTH); if (tmp & RING_ENABLE) { #if VERBOSE > 0 DBG_MSG("reset_state: ring was enabled\n"); #endif refresh_ring(dinfo); intelfbhw_do_sync(dinfo); DO_RING_IDLE(); } OUTREG(PRI_RING_LENGTH, 0); OUTREG(PRI_RING_HEAD, 0); OUTREG(PRI_RING_TAIL, 0); OUTREG(PRI_RING_START, 0); } /* Stop the 2D engine, and turn off the ring buffer. */ void intelfbhw_2d_stop(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_2d_stop: accel: %d, ring_active: %d\n", dinfo->accel, dinfo->ring_active); #endif if (!dinfo->accel) return; dinfo->ring_active = 0; reset_state(dinfo); } /* * Enable the ring buffer, and initialise the 2D engine. * It is assumed that the graphics engine has been stopped by previously * calling intelfb_2d_stop(). */ void intelfbhw_2d_start(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_2d_start: accel: %d, ring_active: %d\n", dinfo->accel, dinfo->ring_active); #endif if (!dinfo->accel) return; /* Initialise the primary ring buffer. */ OUTREG(PRI_RING_LENGTH, 0); OUTREG(PRI_RING_TAIL, 0); OUTREG(PRI_RING_HEAD, 0); OUTREG(PRI_RING_START, dinfo->ring.physical & RING_START_MASK); OUTREG(PRI_RING_LENGTH, ((dinfo->ring.size - GTT_PAGE_SIZE) & RING_LENGTH_MASK) | RING_NO_REPORT | RING_ENABLE); refresh_ring(dinfo); dinfo->ring_active = 1; } /* 2D fillrect (solid fill or invert) */ void intelfbhw_do_fillrect(struct intelfb_info *dinfo, u32 x, u32 y, u32 w, u32 h, u32 color, u32 pitch, u32 bpp, u32 rop) { u32 br00, br09, br13, br14, br16; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_fillrect: (%d,%d) %dx%d, c 0x%06x, p %d bpp %d, " "rop 0x%02x\n", x, y, w, h, color, pitch, bpp, rop); #endif br00 = COLOR_BLT_CMD; br09 = dinfo->fb_start + (y * pitch + x * (bpp / 8)); br13 = (rop << ROP_SHIFT) | pitch; br14 = (h << HEIGHT_SHIFT) | ((w * (bpp / 8)) << WIDTH_SHIFT); br16 = color; switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(6); OUT_RING(br00); OUT_RING(br13); OUT_RING(br14); OUT_RING(br09); OUT_RING(br16); OUT_RING(MI_NOOP); ADVANCE_RING(); #if VERBOSE > 0 DBG_MSG("ring = 0x%08x, 0x%08x (%d)\n", dinfo->ring_head, dinfo->ring_tail, dinfo->ring_space); #endif } void intelfbhw_do_bitblt(struct intelfb_info *dinfo, u32 curx, u32 cury, u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch, u32 bpp) { u32 br00, br09, br11, br12, br13, br22, br23, br26; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_bitblt: (%d,%d)->(%d,%d) %dx%d, p %d bpp %d\n", curx, cury, dstx, dsty, w, h, pitch, bpp); #endif br00 = XY_SRC_COPY_BLT_CMD; br09 = dinfo->fb_start; br11 = (pitch << PITCH_SHIFT); br12 = dinfo->fb_start; br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT); br22 = (dstx << WIDTH_SHIFT) | (dsty << HEIGHT_SHIFT); br23 = ((dstx + w) << WIDTH_SHIFT) | ((dsty + h) << HEIGHT_SHIFT); br26 = (curx << WIDTH_SHIFT) | (cury << HEIGHT_SHIFT); switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(8); OUT_RING(br00); OUT_RING(br13); OUT_RING(br22); OUT_RING(br23); OUT_RING(br09); OUT_RING(br26); OUT_RING(br11); OUT_RING(br12); ADVANCE_RING(); } int intelfbhw_do_drawglyph(struct intelfb_info *dinfo, u32 fg, u32 bg, u32 w, u32 h, const u8* cdat, u32 x, u32 y, u32 pitch, u32 bpp) { int nbytes, ndwords, pad, tmp; u32 br00, br09, br13, br18, br19, br22, br23; int dat, ix, iy, iw; int i, j; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_drawglyph: (%d,%d) %dx%d\n", x, y, w, h); #endif /* size in bytes of a padded scanline */ nbytes = ROUND_UP_TO(w, 16) / 8; /* Total bytes of padded scanline data to write out. */ nbytes = nbytes * h; /* * Check if the glyph data exceeds the immediate mode limit. * It would take a large font (1K pixels) to hit this limit. */ if (nbytes > MAX_MONO_IMM_SIZE) return 0; /* Src data is packaged a dword (32-bit) at a time. */ ndwords = ROUND_UP_TO(nbytes, 4) / 4; /* * Ring has to be padded to a quad word. But because the command starts with 7 bytes, pad only if there is an even number of ndwords */ pad = !(ndwords % 2); tmp = (XY_MONO_SRC_IMM_BLT_CMD & DW_LENGTH_MASK) + ndwords; br00 = (XY_MONO_SRC_IMM_BLT_CMD & ~DW_LENGTH_MASK) | tmp; br09 = dinfo->fb_start; br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT); br18 = bg; br19 = fg; br22 = (x << WIDTH_SHIFT) | (y << HEIGHT_SHIFT); br23 = ((x + w) << WIDTH_SHIFT) | ((y + h) << HEIGHT_SHIFT); switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(8 + ndwords); OUT_RING(br00); OUT_RING(br13); OUT_RING(br22); OUT_RING(br23); OUT_RING(br09); OUT_RING(br18); OUT_RING(br19); ix = iy = 0; iw = ROUND_UP_TO(w, 8) / 8; while (ndwords--) { dat = 0; for (j = 0; j < 2; ++j) { for (i = 0; i < 2; ++i) { if (ix != iw || i == 0) dat |= cdat[iy*iw + ix++] << (i+j*2)*8; } if (ix == iw && iy != (h-1)) { ix = 0; ++iy; } } OUT_RING(dat); } if (pad) OUT_RING(MI_NOOP); ADVANCE_RING(); return 1; } /* HW cursor functions. */ void intelfbhw_cursor_init(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_init\n"); #endif if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~(CURSOR_MODE_MASK | CURSOR_MOBILE_GAMMA_ENABLE | CURSOR_MEM_TYPE_LOCAL | (1 << CURSOR_PIPE_SELECT_SHIFT)); tmp |= CURSOR_MODE_DISABLE; OUTREG(CURSOR_A_CONTROL, tmp); OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp &= ~(CURSOR_FORMAT_MASK | CURSOR_GAMMA_ENABLE | CURSOR_ENABLE | CURSOR_STRIDE_MASK); tmp = CURSOR_FORMAT_3C; OUTREG(CURSOR_CONTROL, tmp); OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.offset << 12); tmp = (64 << CURSOR_SIZE_H_SHIFT) | (64 << CURSOR_SIZE_V_SHIFT); OUTREG(CURSOR_SIZE, tmp); } } void intelfbhw_cursor_hide(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_hide\n"); #endif dinfo->cursor_on = 0; if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~CURSOR_MODE_MASK; tmp |= CURSOR_MODE_DISABLE; OUTREG(CURSOR_A_CONTROL, tmp); /* Flush changes */ OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp &= ~CURSOR_ENABLE; OUTREG(CURSOR_CONTROL, tmp); } } void intelfbhw_cursor_show(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_show\n"); #endif dinfo->cursor_on = 1; if (dinfo->cursor_blanked) return; if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~CURSOR_MODE_MASK; tmp |= CURSOR_MODE_64_4C_AX; OUTREG(CURSOR_A_CONTROL, tmp); /* Flush changes */ OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp |= CURSOR_ENABLE; OUTREG(CURSOR_CONTROL, tmp); } } void intelfbhw_cursor_setpos(struct intelfb_info *dinfo, int x, int y) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_setpos: (%d, %d)\n", x, y); #endif /* * Sets the position. The coordinates are assumed to already * have any offset adjusted. Assume that the cursor is never * completely off-screen, and that x, y are always >= 0. */ tmp = ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) | ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); OUTREG(CURSOR_A_POSITION, tmp); if (IS_I9XX(dinfo)) OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } void intelfbhw_cursor_setcolor(struct intelfb_info *dinfo, u32 bg, u32 fg) { #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_setcolor\n"); #endif OUTREG(CURSOR_A_PALETTE0, bg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE1, fg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE2, fg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE3, bg & CURSOR_PALETTE_MASK); } void intelfbhw_cursor_load(struct intelfb_info *dinfo, int width, int height, u8 *data) { u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual; int i, j, w = width / 8; int mod = width % 8, t_mask, d_mask; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_load\n"); #endif if (!dinfo->cursor.virtual) return; t_mask = 0xff >> mod; d_mask = ~(0xff >> mod); for (i = height; i--; ) { for (j = 0; j < w; j++) { writeb(0x00, addr + j); writeb(*(data++), addr + j+8); } if (mod) { writeb(t_mask, addr + j); writeb(*(data++) & d_mask, addr + j+8); } addr += 16; } } void intelfbhw_cursor_reset(struct intelfb_info *dinfo) { u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual; int i, j; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_reset\n"); #endif if (!dinfo->cursor.virtual) return; for (i = 64; i--; ) { for (j = 0; j < 8; j++) { writeb(0xff, addr + j+0); writeb(0x00, addr + j+8); } addr += 16; } } static irqreturn_t intelfbhw_irq(int irq, void *dev_id) { u16 tmp; struct intelfb_info *dinfo = dev_id; spin_lock(&dinfo->int_lock); tmp = INREG16(IIR); if (dinfo->info->var.vmode & FB_VMODE_INTERLACED) tmp &= PIPE_A_EVENT_INTERRUPT; else tmp &= VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */ if (tmp == 0) { spin_unlock(&dinfo->int_lock); return IRQ_RETVAL(0); /* not us */ } /* clear status bits 0-15 ASAP and don't touch bits 16-31 */ OUTREG(PIPEASTAT, INREG(PIPEASTAT)); OUTREG16(IIR, tmp); if (dinfo->vsync.pan_display) { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, dinfo->vsync.pan_offset); } dinfo->vsync.count++; wake_up_interruptible(&dinfo->vsync.wait); spin_unlock(&dinfo->int_lock); return IRQ_RETVAL(1); } int intelfbhw_enable_irq(struct intelfb_info *dinfo) { u16 tmp; if (!test_and_set_bit(0, &dinfo->irq_flags)) { if (request_irq(dinfo->pdev->irq, intelfbhw_irq, IRQF_SHARED, "intelfb", dinfo)) { clear_bit(0, &dinfo->irq_flags); return -EINVAL; } spin_lock_irq(&dinfo->int_lock); OUTREG16(HWSTAM, 0xfffe); /* i830 DRM uses ffff */ OUTREG16(IMR, 0); } else spin_lock_irq(&dinfo->int_lock); if (dinfo->info->var.vmode & FB_VMODE_INTERLACED) tmp = PIPE_A_EVENT_INTERRUPT; else tmp = VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */ if (tmp != INREG16(IER)) { DBG_MSG("changing IER to 0x%X\n", tmp); OUTREG16(IER, tmp); } spin_unlock_irq(&dinfo->int_lock); return 0; } void intelfbhw_disable_irq(struct intelfb_info *dinfo) { if (test_and_clear_bit(0, &dinfo->irq_flags)) { if (dinfo->vsync.pan_display) { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, dinfo->vsync.pan_offset); } spin_lock_irq(&dinfo->int_lock); OUTREG16(HWSTAM, 0xffff); OUTREG16(IMR, 0xffff); OUTREG16(IER, 0x0); OUTREG16(IIR, INREG16(IIR)); /* clear IRQ requests */ spin_unlock_irq(&dinfo->int_lock); free_irq(dinfo->pdev->irq, dinfo); } } int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe) { struct intelfb_vsync *vsync; unsigned int count; int ret; switch (pipe) { case 0: vsync = &dinfo->vsync; break; default: return -ENODEV; } ret = intelfbhw_enable_irq(dinfo); if (ret) return ret; count = vsync->count; ret = wait_event_interruptible_timeout(vsync->wait, count != vsync->count, HZ / 10); if (ret < 0) return ret; if (ret == 0) { DBG_MSG("wait_for_vsync timed out!\n"); return -ETIMEDOUT; } return 0; }
gpl-2.0
indodev/kernel-samsung-3.0
drivers/i2c/busses/i2c-versatile.c
4245
3578
/* * i2c-versatile.c * * Copyright (C) 2006 ARM Ltd. * written by Russell King, Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #define I2C_CONTROL 0x00 #define I2C_CONTROLS 0x00 #define I2C_CONTROLC 0x04 #define SCL (1 << 0) #define SDA (1 << 1) struct i2c_versatile { struct i2c_adapter adap; struct i2c_algo_bit_data algo; void __iomem *base; }; static void i2c_versatile_setsda(void *data, int state) { struct i2c_versatile *i2c = data; writel(SDA, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static void i2c_versatile_setscl(void *data, int state) { struct i2c_versatile *i2c = data; writel(SCL, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static int i2c_versatile_getsda(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SDA); } static int i2c_versatile_getscl(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SCL); } static struct i2c_algo_bit_data i2c_versatile_algo = { .setsda = i2c_versatile_setsda, .setscl = i2c_versatile_setscl, .getsda = i2c_versatile_getsda, .getscl = i2c_versatile_getscl, .udelay = 30, .timeout = HZ, }; static int i2c_versatile_probe(struct platform_device *dev) { struct i2c_versatile *i2c; struct resource *r; int ret; r = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!r) { ret = -EINVAL; goto err_out; } if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) { ret = -EBUSY; goto err_out; } i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL); if (!i2c) { ret = -ENOMEM; goto err_release; } i2c->base = ioremap(r->start, resource_size(r)); if (!i2c->base) { ret = -ENOMEM; goto err_free; } writel(SCL | SDA, i2c->base + I2C_CONTROLS); i2c->adap.owner = THIS_MODULE; strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name)); i2c->adap.algo_data = &i2c->algo; i2c->adap.dev.parent = &dev->dev; i2c->algo = i2c_versatile_algo; i2c->algo.data = i2c; if (dev->id >= 0) { /* static bus numbering */ i2c->adap.nr = dev->id; ret = i2c_bit_add_numbered_bus(&i2c->adap); } else /* dynamic bus numbering */ ret = i2c_bit_add_bus(&i2c->adap); if (ret >= 0) { platform_set_drvdata(dev, i2c); return 0; } iounmap(i2c->base); err_free: kfree(i2c); err_release: release_mem_region(r->start, resource_size(r)); err_out: return ret; } static int i2c_versatile_remove(struct platform_device *dev) { struct i2c_versatile *i2c = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); i2c_del_adapter(&i2c->adap); return 0; } static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, .remove = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .owner = THIS_MODULE, }, }; static int __init i2c_versatile_init(void) { return platform_driver_register(&i2c_versatile_driver); } static void __exit i2c_versatile_exit(void) { platform_driver_unregister(&i2c_versatile_driver); } subsys_initcall(i2c_versatile_init); module_exit(i2c_versatile_exit); MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:versatile-i2c");
gpl-2.0
kogone/android_kernel_oneplus_msm8974
drivers/net/wireless/ath/ath6kl/main.c
4757
32521
/* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include "hif-ops.h" #include "cfg80211.h" #include "target.h" #include "debug.h" struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) { struct ath6kl *ar = vif->ar; struct ath6kl_sta *conn = NULL; u8 i, max_conn; max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; for (i = 0; i < max_conn; i++) { if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { conn = &ar->sta_list[i]; break; } } return conn; } struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid) { struct ath6kl_sta *conn = NULL; u8 ctr; for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { if (ar->sta_list[ctr].aid == aid) { conn = &ar->sta_list[ctr]; break; } } return conn; } static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid, u8 *wpaie, size_t ielen, u8 keymgmt, u8 ucipher, u8 auth, u8 apsd_info) { struct ath6kl *ar = vif->ar; struct ath6kl_sta *sta; u8 free_slot; free_slot = aid - 1; sta = &ar->sta_list[free_slot]; memcpy(sta->mac, mac, ETH_ALEN); if (ielen <= ATH6KL_MAX_IE) memcpy(sta->wpa_ie, wpaie, ielen); sta->aid = aid; sta->keymgmt = keymgmt; sta->ucipher = ucipher; sta->auth = auth; sta->apsd_info = apsd_info; ar->sta_list_index = ar->sta_list_index | (1 << free_slot); ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid); aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn); } static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) { struct ath6kl_sta *sta = &ar->sta_list[i]; struct ath6kl_mgmt_buff *entry, *tmp; /* empty the queued pkts in the PS queue if any */ spin_lock_bh(&sta->psq_lock); skb_queue_purge(&sta->psq); skb_queue_purge(&sta->apsdq); if (sta->mgmt_psq_len != 0) { list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) { kfree(entry); } INIT_LIST_HEAD(&sta->mgmt_psq); sta->mgmt_psq_len = 0; } spin_unlock_bh(&sta->psq_lock); memset(&ar->ap_stats.sta[sta->aid - 1], 0, sizeof(struct wmi_per_sta_stat)); memset(sta->mac, 0, ETH_ALEN); memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); sta->aid = 0; sta->sta_flags = 0; ar->sta_list_index = ar->sta_list_index & ~(1 << i); aggr_reset_state(sta->aggr_conn); } static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason) { u8 i, removed = 0; if (is_zero_ether_addr(mac)) return removed; if (is_broadcast_ether_addr(mac)) { ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n"); for (i = 0; i < AP_MAX_NUM_STA; i++) { if (!is_zero_ether_addr(ar->sta_list[i].mac)) { ath6kl_sta_cleanup(ar, i); removed = 1; } } } else { for (i = 0; i < AP_MAX_NUM_STA; i++) { if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) { ath6kl_dbg(ATH6KL_DBG_TRC, "deleting station %pM aid=%d reason=%d\n", mac, ar->sta_list[i].aid, reason); ath6kl_sta_cleanup(ar, i); removed = 1; break; } } } return removed; } enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac) { struct ath6kl *ar = devt; return ar->ac2ep_map[ac]; } struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar) { struct ath6kl_cookie *cookie; cookie = ar->cookie_list; if (cookie != NULL) { ar->cookie_list = cookie->arc_list_next; ar->cookie_count--; } return cookie; } void ath6kl_cookie_init(struct ath6kl *ar) { u32 i; ar->cookie_list = NULL; ar->cookie_count = 0; memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem)); for (i = 0; i < MAX_COOKIE_NUM; i++) ath6kl_free_cookie(ar, &ar->cookie_mem[i]); } void ath6kl_cookie_cleanup(struct ath6kl *ar) { ar->cookie_list = NULL; ar->cookie_count = 0; } void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) { /* Insert first */ if (!ar || !cookie) return; cookie->arc_list_next = ar->cookie_list; ar->cookie_list = cookie; ar->cookie_count++; } /* * Read from the hardware through its diagnostic window. No cooperation * from the firmware is required for this. */ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) { int ret; ret = ath6kl_hif_diag_read32(ar, address, value); if (ret) { ath6kl_warn("failed to read32 through diagnose window: %d\n", ret); return ret; } return 0; } /* * Write to the ATH6KL through its diagnostic window. No cooperation from * the Target is required for this. */ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) { int ret; ret = ath6kl_hif_diag_write32(ar, address, value); if (ret) { ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", address, value); return ret; } return 0; } int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) { u32 count, *buf = data; int ret; if (WARN_ON(length % 4)) return -EINVAL; for (count = 0; count < length / 4; count++, address += 4) { ret = ath6kl_diag_read32(ar, address, &buf[count]); if (ret) return ret; } return 0; } int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length) { u32 count; __le32 *buf = data; int ret; if (WARN_ON(length % 4)) return -EINVAL; for (count = 0; count < length / 4; count++, address += 4) { ret = ath6kl_diag_write32(ar, address, buf[count]); if (ret) return ret; } return 0; } int ath6kl_read_fwlogs(struct ath6kl *ar) { struct ath6kl_dbglog_hdr debug_hdr; struct ath6kl_dbglog_buf debug_buf; u32 address, length, dropped, firstbuf, debug_hdr_addr; int ret, loop; u8 *buf; buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; address = TARG_VTOP(ar->target_type, ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_dbglog_hdr))); ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr); if (ret) goto out; /* Get the contents of the ring buffer */ if (debug_hdr_addr == 0) { ath6kl_warn("Invalid address for debug_hdr_addr\n"); ret = -EINVAL; goto out; } address = TARG_VTOP(ar->target_type, debug_hdr_addr); ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr)); address = TARG_VTOP(ar->target_type, le32_to_cpu(debug_hdr.dbuf_addr)); firstbuf = address; dropped = le32_to_cpu(debug_hdr.dropped); ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); loop = 100; do { address = TARG_VTOP(ar->target_type, le32_to_cpu(debug_buf.buffer_addr)); length = le32_to_cpu(debug_buf.length); if (length != 0 && (le32_to_cpu(debug_buf.length) <= le32_to_cpu(debug_buf.bufsize))) { length = ALIGN(length, 4); ret = ath6kl_diag_read(ar, address, buf, length); if (ret) goto out; ath6kl_debug_fwlog_event(ar, buf, length); } address = TARG_VTOP(ar->target_type, le32_to_cpu(debug_buf.next)); ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); if (ret) goto out; loop--; if (WARN_ON(loop == 0)) { ret = -ETIMEDOUT; goto out; } } while (address != firstbuf); out: kfree(buf); return ret; } /* FIXME: move to a better place, target.h? */ #define AR6003_RESET_CONTROL_ADDRESS 0x00004000 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000 void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, bool wait_fot_compltn, bool cold_reset) { int status = 0; u32 address; __le32 data; if (target_type != TARGET_TYPE_AR6003 && target_type != TARGET_TYPE_AR6004) return; data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) : cpu_to_le32(RESET_CONTROL_MBOX_RST); switch (target_type) { case TARGET_TYPE_AR6003: address = AR6003_RESET_CONTROL_ADDRESS; break; case TARGET_TYPE_AR6004: address = AR6004_RESET_CONTROL_ADDRESS; break; } status = ath6kl_diag_write32(ar, address, data); if (status) ath6kl_err("failed to reset target\n"); } static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) { u8 index; u8 keyusage; for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) { if (vif->wep_key_list[index].key_len) { keyusage = GROUP_USAGE; if (index == vif->def_txkey_index) keyusage |= TX_USAGE; ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, index, WEP_CRYPT, keyusage, vif->wep_key_list[index].key_len, NULL, 0, vif->wep_key_list[index].key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } } } void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) { struct ath6kl *ar = vif->ar; struct ath6kl_req_key *ik; int res; u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; ik = &ar->ap_mode_bkey; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); switch (vif->auth_mode) { case NONE_AUTH: if (vif->prwise_crypto == WEP_CRYPT) ath6kl_install_static_wep_keys(vif); if (!ik->valid || ik->key_type != WAPI_CRYPT) break; /* for WAPI, we need to set the delayed group key, continue: */ case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH | WPA2_PSK_AUTH): if (!ik->valid) break; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for " "the initial group key for AP mode\n"); memset(key_rsc, 0, sizeof(key_rsc)); res = ath6kl_wmi_addkey_cmd( ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN, ik->key, KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); if (res) { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed " "addkey failed: %d\n", res); } break; } ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); set_bit(CONNECTED, &vif->flags); netif_carrier_on(vif->ndev); } void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info, u8 apsd_info) { u8 *ies = NULL, *wpa_ie = NULL, *pos; size_t ies_len = 0; struct station_info sinfo; ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) assoc_info; if (ieee80211_is_assoc_req(mgmt->frame_control) && assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) + sizeof(mgmt->u.assoc_req)) { ies = mgmt->u.assoc_req.variable; ies_len = assoc_info + assoc_req_len - ies; } else if (ieee80211_is_reassoc_req(mgmt->frame_control) && assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) + sizeof(mgmt->u.reassoc_req)) { ies = mgmt->u.reassoc_req.variable; ies_len = assoc_info + assoc_req_len - ies; } } pos = ies; while (pos && pos + 1 < ies + ies_len) { if (pos + 2 + pos[1] > ies + ies_len) break; if (pos[0] == WLAN_EID_RSN) wpa_ie = pos; /* RSN IE */ else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 && pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) { if (pos[5] == 0x01) wpa_ie = pos; /* WPA IE */ else if (pos[5] == 0x04) { wpa_ie = pos; /* WPS IE */ break; /* overrides WPA/RSN IE */ } } else if (pos[0] == 0x44 && wpa_ie == NULL) { /* * Note: WAPI Parameter Set IE re-uses Element ID that * was officially allocated for BSS AC Access Delay. As * such, we need to be a bit more careful on when * parsing the frame. However, BSS AC Access Delay * element is not supposed to be included in * (Re)Association Request frames, so this should not * cause problems. */ wpa_ie = pos; /* WAPI IE */ break; } pos += 2 + pos[1]; } ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie, wpa_ie ? 2 + wpa_ie[1] : 0, keymgmt, ucipher, auth, apsd_info); /* send event to application */ memset(&sinfo, 0, sizeof(sinfo)); /* TODO: sinfo.generation */ sinfo.assoc_req_ies = ies; sinfo.assoc_req_ies_len = ies_len; sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); netif_wake_queue(vif->ndev); } void disconnect_timer_handler(unsigned long ptr) { struct net_device *dev = (struct net_device *)ptr; struct ath6kl_vif *vif = netdev_priv(dev); ath6kl_init_profile_info(vif); ath6kl_disconnect(vif); } void ath6kl_disconnect(struct ath6kl_vif *vif) { if (test_bit(CONNECTED, &vif->flags) || test_bit(CONNECT_PEND, &vif->flags)) { ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); /* * Disconnect command is issued, clear the connect pending * flag. The connected flag will be cleared in * disconnect event notification. */ clear_bit(CONNECT_PEND, &vif->flags); } } /* WMI Event handlers */ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) { struct ath6kl *ar = devt; memcpy(ar->mac_addr, datap, ETH_ALEN); ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", __func__, ar->mac_addr); ar->version.wlan_ver = sw_ver; ar->version.abi_ver = abi_ver; snprintf(ar->wiphy->fw_version, sizeof(ar->wiphy->fw_version), "%u.%u.%u.%u", (ar->version.wlan_ver & 0xf0000000) >> 28, (ar->version.wlan_ver & 0x0f000000) >> 24, (ar->version.wlan_ver & 0x00ff0000) >> 16, (ar->version.wlan_ver & 0x0000ffff)); /* indicate to the waiting thread that the ready event was received */ set_bit(WMI_READY, &ar->flag); wake_up(&ar->event_wq); } void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status) { struct ath6kl *ar = vif->ar; bool aborted = false; if (status != WMI_SCAN_STATUS_SUCCESS) aborted = true; ath6kl_cfg80211_scan_complete_event(vif, aborted); if (!ar->usr_bss_filter) { clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); } ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status); } void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info) { struct ath6kl *ar = vif->ar; ath6kl_cfg80211_connect_event(vif, channel, bssid, listen_int, beacon_int, net_type, beacon_ie_len, assoc_req_len, assoc_resp_len, assoc_info); memcpy(vif->bssid, bssid, sizeof(vif->bssid)); vif->bss_ch = channel; if ((vif->nw_type == INFRA_NETWORK)) ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, vif->listen_intvl_t, 0); netif_wake_queue(vif->ndev); /* Update connect & link status atomically */ spin_lock_bh(&vif->if_lock); set_bit(CONNECTED, &vif->flags); clear_bit(CONNECT_PEND, &vif->flags); netif_carrier_on(vif->ndev); spin_unlock_bh(&vif->if_lock); aggr_reset_state(vif->aggr_cntxt->aggr_conn); vif->reconnect_flag = 0; if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { memset(ar->node_map, 0, sizeof(ar->node_map)); ar->node_num = 0; ar->next_ep_id = ENDPOINT_2; } if (!ar->usr_bss_filter) { set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, CURRENT_BSS_FILTER, 0); } } void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) { struct ath6kl_sta *sta; struct ath6kl *ar = vif->ar; u8 tsc[6]; /* * For AP case, keyid will have aid of STA which sent pkt with * MIC error. Use this aid to get MAC & send it to hostapd. */ if (vif->nw_type == AP_NETWORK) { sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); if (!sta) return; ath6kl_dbg(ATH6KL_DBG_TRC, "ap tkip mic error received from aid=%d\n", keyid); memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ cfg80211_michael_mic_failure(vif->ndev, sta->mac, NL80211_KEYTYPE_PAIRWISE, keyid, tsc, GFP_KERNEL); } else ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); } static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) { struct wmi_target_stats *tgt_stats = (struct wmi_target_stats *) ptr; struct ath6kl *ar = vif->ar; struct target_stats *stats = &vif->target_stats; struct tkip_ccmp_stats *ccmp_stats; u8 ac; if (len < sizeof(*tgt_stats)) return; ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n"); stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt); stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte); stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt); stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte); stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt); stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte); stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt); stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte); stats->tx_rts_success_cnt += le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt); for (ac = 0; ac < WMM_NUM_AC; ac++) stats->tx_pkt_per_ac[ac] += le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]); stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err); stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt); stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt); stats->tx_mult_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt); stats->tx_rts_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt); stats->tx_ucast_rate = ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate)); stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt); stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte); stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt); stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte); stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt); stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte); stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt); stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte); stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt); stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err); stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err); stats->rx_key_cache_miss += le32_to_cpu(tgt_stats->stats.rx.key_cache_miss); stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err); stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame); stats->rx_ucast_rate = ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate)); ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats; stats->tkip_local_mic_fail += le32_to_cpu(ccmp_stats->tkip_local_mic_fail); stats->tkip_cnter_measures_invoked += le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked); stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err); stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err); stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays); stats->pwr_save_fail_cnt += le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt); stats->noise_floor_calib = a_sle32_to_cpu(tgt_stats->noise_floor_calib); stats->cs_bmiss_cnt += le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt); stats->cs_low_rssi_cnt += le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt); stats->cs_connect_cnt += le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt); stats->cs_discon_cnt += le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt); stats->cs_ave_beacon_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi); stats->cs_last_roam_msec = tgt_stats->cserv_stats.cs_last_roam_msec; stats->cs_snr = tgt_stats->cserv_stats.cs_snr; stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi); stats->lq_val = le32_to_cpu(tgt_stats->lq_val); stats->wow_pkt_dropped += le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped); stats->wow_host_pkt_wakeups += tgt_stats->wow_stats.wow_host_pkt_wakeups; stats->wow_host_evt_wakeups += tgt_stats->wow_stats.wow_host_evt_wakeups; stats->wow_evt_discarded += le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { clear_bit(STATS_UPDATE_PEND, &vif->flags); wake_up(&ar->event_wq); } } static void ath6kl_add_le32(__le32 *var, __le32 val) { *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); } void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) { struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; struct ath6kl *ar = vif->ar; struct wmi_ap_mode_stat *ap = &ar->ap_stats; struct wmi_per_sta_stat *st_ap, *st_p; u8 ac; if (vif->nw_type == AP_NETWORK) { if (len < sizeof(*p)) return; for (ac = 0; ac < AP_MAX_NUM_STA; ac++) { st_ap = &ap->sta[ac]; st_p = &p->sta[ac]; ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes); ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts); ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error); ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard); ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes); ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts); ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error); ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard); } } else { ath6kl_update_target_stats(vif, ptr, len); } } void ath6kl_wakeup_event(void *dev) { struct ath6kl *ar = (struct ath6kl *) dev; wake_up(&ar->event_wq); } void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) { struct ath6kl *ar = (struct ath6kl *) devt; ar->tx_pwr = tx_pwr; wake_up(&ar->event_wq); } void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) { struct ath6kl_sta *conn; struct sk_buff *skb; bool psq_empty = false; struct ath6kl *ar = vif->ar; struct ath6kl_mgmt_buff *mgmt_buf; conn = ath6kl_find_sta_by_aid(ar, aid); if (!conn) return; /* * Send out a packet queued on ps queue. When the ps queue * becomes empty update the PVB for this station. */ spin_lock_bh(&conn->psq_lock); psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); spin_unlock_bh(&conn->psq_lock); if (psq_empty) /* TODO: Send out a NULL data frame */ return; spin_lock_bh(&conn->psq_lock); if (conn->mgmt_psq_len > 0) { mgmt_buf = list_first_entry(&conn->mgmt_psq, struct ath6kl_mgmt_buff, list); list_del(&mgmt_buf->list); conn->mgmt_psq_len--; spin_unlock_bh(&conn->psq_lock); conn->sta_flags |= STA_PS_POLLED; ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, mgmt_buf->id, mgmt_buf->freq, mgmt_buf->wait, mgmt_buf->buf, mgmt_buf->len, mgmt_buf->no_cck); conn->sta_flags &= ~STA_PS_POLLED; kfree(mgmt_buf); } else { skb = skb_dequeue(&conn->psq); spin_unlock_bh(&conn->psq_lock); conn->sta_flags |= STA_PS_POLLED; ath6kl_data_tx(skb, vif->ndev); conn->sta_flags &= ~STA_PS_POLLED; } spin_lock_bh(&conn->psq_lock); psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); spin_unlock_bh(&conn->psq_lock); if (psq_empty) ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); } void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) { bool mcastq_empty = false; struct sk_buff *skb; struct ath6kl *ar = vif->ar; /* * If there are no associated STAs, ignore the DTIM expiry event. * There can be potential race conditions where the last associated * STA may disconnect & before the host could clear the 'Indicate * DTIM' request to the firmware, the firmware would have just * indicated a DTIM expiry event. The race is between 'clear DTIM * expiry cmd' going from the host to the firmware & the DTIM * expiry event happening from the firmware to the host. */ if (!ar->sta_list_index) return; spin_lock_bh(&ar->mcastpsq_lock); mcastq_empty = skb_queue_empty(&ar->mcastpsq); spin_unlock_bh(&ar->mcastpsq_lock); if (mcastq_empty) return; /* set the STA flag to dtim_expired for the frame to go out */ set_bit(DTIM_EXPIRED, &vif->flags); spin_lock_bh(&ar->mcastpsq_lock); while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { spin_unlock_bh(&ar->mcastpsq_lock); ath6kl_data_tx(skb, vif->ndev); spin_lock_bh(&ar->mcastpsq_lock); } spin_unlock_bh(&ar->mcastpsq_lock); clear_bit(DTIM_EXPIRED, &vif->flags); /* clear the LSB of the BitMapCtl field of the TIM IE */ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); } void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status) { struct ath6kl *ar = vif->ar; if (vif->nw_type == AP_NETWORK) { if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) return; /* if no more associated STAs, empty the mcast PS q */ if (ar->sta_list_index == 0) { spin_lock_bh(&ar->mcastpsq_lock); skb_queue_purge(&ar->mcastpsq); spin_unlock_bh(&ar->mcastpsq_lock); /* clear the LSB of the TIM IE's BitMapCtl field */ if (test_bit(WMI_READY, &ar->flag)) ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); } if (!is_broadcast_ether_addr(bssid)) { /* send event to application */ cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL); } if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) { memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); clear_bit(CONNECTED, &vif->flags); } return; } ath6kl_cfg80211_disconnect_event(vif, reason, bssid, assoc_resp_len, assoc_info, prot_reason_status); aggr_reset_state(vif->aggr_cntxt->aggr_conn); del_timer(&vif->disconnect_timer); ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); /* * If the event is due to disconnect cmd from the host, only they * the target would stop trying to connect. Under any other * condition, target would keep trying to connect. */ if (reason == DISCONNECT_CMD) { if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); } else { set_bit(CONNECT_PEND, &vif->flags); if (((reason == ASSOC_FAILED) && (prot_reason_status == 0x11)) || ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) && (vif->reconnect_flag == 1))) { set_bit(CONNECTED, &vif->flags); return; } } /* update connect & link status atomically */ spin_lock_bh(&vif->if_lock); clear_bit(CONNECTED, &vif->flags); netif_carrier_off(vif->ndev); spin_unlock_bh(&vif->if_lock); if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1)) vif->reconnect_flag = 0; if (reason != CSERV_DISCONNECT) ar->user_key_ctrl = 0; netif_stop_queue(vif->ndev); memset(vif->bssid, 0, sizeof(vif->bssid)); vif->bss_ch = 0; ath6kl_tx_data_cleanup(ar); } struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) { struct ath6kl_vif *vif; spin_lock_bh(&ar->list_lock); if (list_empty(&ar->vif_list)) { spin_unlock_bh(&ar->list_lock); return NULL; } vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); spin_unlock_bh(&ar->list_lock); return vif; } static int ath6kl_open(struct net_device *dev) { struct ath6kl_vif *vif = netdev_priv(dev); set_bit(WLAN_ENABLED, &vif->flags); if (test_bit(CONNECTED, &vif->flags)) { netif_carrier_on(dev); netif_wake_queue(dev); } else netif_carrier_off(dev); return 0; } static int ath6kl_close(struct net_device *dev) { struct ath6kl_vif *vif = netdev_priv(dev); netif_stop_queue(dev); ath6kl_cfg80211_stop(vif); clear_bit(WLAN_ENABLED, &vif->flags); return 0; } static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) { struct ath6kl_vif *vif = netdev_priv(dev); return &vif->net_stats; } static int ath6kl_set_features(struct net_device *dev, netdev_features_t features) { struct ath6kl_vif *vif = netdev_priv(dev); struct ath6kl *ar = vif->ar; int err = 0; if ((features & NETIF_F_RXCSUM) && (ar->rx_meta_ver != WMI_META_VERSION_2)) { ar->rx_meta_ver = WMI_META_VERSION_2; err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, vif->fw_vif_idx, ar->rx_meta_ver, 0, 0); if (err) { dev->features = features & ~NETIF_F_RXCSUM; return err; } } else if (!(features & NETIF_F_RXCSUM) && (ar->rx_meta_ver == WMI_META_VERSION_2)) { ar->rx_meta_ver = 0; err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, vif->fw_vif_idx, ar->rx_meta_ver, 0, 0); if (err) { dev->features = features | NETIF_F_RXCSUM; return err; } } return err; } static void ath6kl_set_multicast_list(struct net_device *ndev) { struct ath6kl_vif *vif = netdev_priv(ndev); bool mc_all_on = false, mc_all_off = false; int mc_count = netdev_mc_count(ndev); struct netdev_hw_addr *ha; bool found; struct ath6kl_mc_filter *mc_filter, *tmp; struct list_head mc_filter_new; int ret; if (!test_bit(WMI_READY, &vif->ar->flag) || !test_bit(WLAN_ENABLED, &vif->flags)) return; mc_all_on = !!(ndev->flags & IFF_PROMISC) || !!(ndev->flags & IFF_ALLMULTI) || !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST); mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0; if (mc_all_on || mc_all_off) { /* Enable/disable all multicast */ ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n", mc_all_on ? "enabling" : "disabling"); ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, mc_all_on); if (ret) ath6kl_warn("Failed to %s multicast receive\n", mc_all_on ? "enable" : "disable"); return; } list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) { found = false; netdev_for_each_mc_addr(ha, ndev) { if (memcmp(ha->addr, mc_filter->hw_addr, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { found = true; break; } } if (!found) { /* * Delete the filter which was previously set * but not in the new request. */ ath6kl_dbg(ATH6KL_DBG_TRC, "Removing %pM from multicast filter\n", mc_filter->hw_addr); ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, mc_filter->hw_addr, false); if (ret) { ath6kl_warn("Failed to remove multicast filter:%pM\n", mc_filter->hw_addr); return; } list_del(&mc_filter->list); kfree(mc_filter); } } INIT_LIST_HEAD(&mc_filter_new); netdev_for_each_mc_addr(ha, ndev) { found = false; list_for_each_entry(mc_filter, &vif->mc_filter, list) { if (memcmp(ha->addr, mc_filter->hw_addr, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { found = true; break; } } if (!found) { mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter), GFP_ATOMIC); if (!mc_filter) { WARN_ON(1); goto out; } memcpy(mc_filter->hw_addr, ha->addr, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE); /* Set the multicast filter */ ath6kl_dbg(ATH6KL_DBG_TRC, "Adding %pM to multicast filter list\n", mc_filter->hw_addr); ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, mc_filter->hw_addr, true); if (ret) { ath6kl_warn("Failed to add multicast filter :%pM\n", mc_filter->hw_addr); kfree(mc_filter); goto out; } list_add_tail(&mc_filter->list, &mc_filter_new); } } out: list_splice_tail(&mc_filter_new, &vif->mc_filter); } static const struct net_device_ops ath6kl_netdev_ops = { .ndo_open = ath6kl_open, .ndo_stop = ath6kl_close, .ndo_start_xmit = ath6kl_data_tx, .ndo_get_stats = ath6kl_get_stats, .ndo_set_features = ath6kl_set_features, .ndo_set_rx_mode = ath6kl_set_multicast_list, }; void init_netdev(struct net_device *dev) { dev->netdev_ops = &ath6kl_netdev_ops; dev->destructor = free_netdev; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->needed_headroom = ETH_HLEN; dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES; dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; return; }
gpl-2.0
PatrickPalm/alcatel-kernel-msm7x30
drivers/hid/hid-twinhan.c
4757
4957
/* * HID driver for TwinHan IR remote control * * Based on hid-gyration.c * * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* Remote control key layout + listing: * * Full Screen Power * KEY_SCREEN KEY_POWER2 * * 1 2 3 * KEY_NUMERIC_1 KEY_NUMERIC_2 KEY_NUMERIC_3 * * 4 5 6 * KEY_NUMERIC_4 KEY_NUMERIC_5 KEY_NUMERIC_6 * * 7 8 9 * KEY_NUMERIC_7 KEY_NUMERIC_8 KEY_NUMERIC_9 * * REC 0 Favorite * KEY_RECORD KEY_NUMERIC_0 KEY_FAVORITES * * Rewind Forward * KEY_REWIND CH+ KEY_FORWARD * KEY_CHANNELUP * * VOL- > VOL+ * KEY_VOLUMEDOWN KEY_PLAY KEY_VOLUMEUP * * CH- * KEY_CHANNELDOWN * Recall Stop * KEY_RESTART KEY_STOP * * Timeshift/Pause Mute Cancel * KEY_PAUSE KEY_MUTE KEY_CANCEL * * Capture Preview EPG * KEY_PRINT KEY_PROGRAM KEY_EPG * * Record List Tab Teletext * KEY_LIST KEY_TAB KEY_TEXT */ #define th_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int twinhan_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD) return 0; switch (usage->hid & HID_USAGE) { /* Map all keys from Twinhan Remote */ case 0x004: th_map_key_clear(KEY_TEXT); break; case 0x006: th_map_key_clear(KEY_RESTART); break; case 0x008: th_map_key_clear(KEY_EPG); break; case 0x00c: th_map_key_clear(KEY_REWIND); break; case 0x00e: th_map_key_clear(KEY_PROGRAM); break; case 0x00f: th_map_key_clear(KEY_LIST); break; case 0x010: th_map_key_clear(KEY_MUTE); break; case 0x011: th_map_key_clear(KEY_FORWARD); break; case 0x013: th_map_key_clear(KEY_PRINT); break; case 0x017: th_map_key_clear(KEY_PAUSE); break; case 0x019: th_map_key_clear(KEY_FAVORITES); break; case 0x01d: th_map_key_clear(KEY_SCREEN); break; case 0x01e: th_map_key_clear(KEY_NUMERIC_1); break; case 0x01f: th_map_key_clear(KEY_NUMERIC_2); break; case 0x020: th_map_key_clear(KEY_NUMERIC_3); break; case 0x021: th_map_key_clear(KEY_NUMERIC_4); break; case 0x022: th_map_key_clear(KEY_NUMERIC_5); break; case 0x023: th_map_key_clear(KEY_NUMERIC_6); break; case 0x024: th_map_key_clear(KEY_NUMERIC_7); break; case 0x025: th_map_key_clear(KEY_NUMERIC_8); break; case 0x026: th_map_key_clear(KEY_NUMERIC_9); break; case 0x027: th_map_key_clear(KEY_NUMERIC_0); break; case 0x028: th_map_key_clear(KEY_PLAY); break; case 0x029: th_map_key_clear(KEY_CANCEL); break; case 0x02b: th_map_key_clear(KEY_TAB); break; /* Power = 0x0e0 + 0x0e1 + 0x0e2 + 0x03f */ case 0x03f: th_map_key_clear(KEY_POWER2); break; case 0x04a: th_map_key_clear(KEY_RECORD); break; case 0x04b: th_map_key_clear(KEY_CHANNELUP); break; case 0x04d: th_map_key_clear(KEY_STOP); break; case 0x04e: th_map_key_clear(KEY_CHANNELDOWN); break; /* Volume down = 0x0e1 + 0x051 */ case 0x051: th_map_key_clear(KEY_VOLUMEDOWN); break; /* Volume up = 0x0e1 + 0x052 */ case 0x052: th_map_key_clear(KEY_VOLUMEUP); break; /* Kill the extra keys used for multi-key "power" and "volume" keys * as well as continuously to release CTRL,ALT,META,... keys */ case 0x0e0: case 0x0e1: case 0x0e2: case 0x0e3: case 0x0e4: case 0x0e5: case 0x0e6: case 0x0e7: default: return -1; } return 1; } static const struct hid_device_id twinhan_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, { } }; MODULE_DEVICE_TABLE(hid, twinhan_devices); static struct hid_driver twinhan_driver = { .name = "twinhan", .id_table = twinhan_devices, .input_mapping = twinhan_input_mapping, }; static int __init twinhan_init(void) { return hid_register_driver(&twinhan_driver); } static void __exit twinhan_exit(void) { hid_unregister_driver(&twinhan_driver); } module_init(twinhan_init); module_exit(twinhan_exit); MODULE_LICENSE("GPL");
gpl-2.0
Garcia98/kernel-amami
drivers/infiniband/hw/qib/qib_wc_x86_64.c
9109
5372
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file is conditionally built on x86_64 only. Otherwise weak symbol * versions of the functions exported from here are used. */ #include <linux/pci.h> #include <asm/mtrr.h> #include <asm/processor.h> #include "qib.h" /** * qib_enable_wc - enable write combining for MMIO writes to the device * @dd: qlogic_ib device * * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable * write combining. */ int qib_enable_wc(struct qib_devdata *dd) { int ret = 0; u64 pioaddr, piolen; unsigned bits; const unsigned long addr = pci_resource_start(dd->pcidev, 0); const size_t len = pci_resource_len(dd->pcidev, 0); /* * Set the PIO buffers to be WCCOMB, so we get HT bursts to the * chip. Linux (possibly the hardware) requires it to be on a power * of 2 address matching the length (which has to be a power of 2). * For rev1, that means the base address, for rev2, it will be just * the PIO buffers themselves. * For chips with two sets of buffers, the calculations are * somewhat more complicated; we need to sum, and the piobufbase * register has both offsets, 2K in low 32 bits, 4K in high 32 bits. * The buffers are still packed, so a single range covers both. */ if (dd->piobcnt2k && dd->piobcnt4k) { /* 2 sizes for chip */ unsigned long pio2kbase, pio4kbase; pio2kbase = dd->piobufbase & 0xffffffffUL; pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; if (pio2kbase < pio4kbase) { /* all current chips */ pioaddr = addr + pio2kbase; piolen = pio4kbase - pio2kbase + dd->piobcnt4k * dd->align4k; } else { pioaddr = addr + pio4kbase; piolen = pio2kbase - pio4kbase + dd->piobcnt2k * dd->palign; } } else { /* single buffer size (2K, currently) */ pioaddr = addr + dd->piobufbase; piolen = dd->piobcnt2k * dd->palign + dd->piobcnt4k * dd->align4k; } for (bits = 0; !(piolen & (1ULL << bits)); bits++) /* do nothing */ ; if (piolen != (1ULL << bits)) { piolen >>= bits; while (piolen >>= 1) bits++; piolen = 1ULL << (bits + 1); } if (pioaddr & (piolen - 1)) { u64 atmp; atmp = pioaddr & ~(piolen - 1); if (atmp < addr || (atmp + piolen) > (addr + len)) { qib_dev_err(dd, "No way to align address/size " "(%llx/%llx), no WC mtrr\n", (unsigned long long) atmp, (unsigned long long) piolen << 1); ret = -ENODEV; } else { pioaddr = atmp; piolen <<= 1; } } if (!ret) { int cookie; cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); if (cookie < 0) { { qib_devinfo(dd->pcidev, "mtrr_add() WC for PIO bufs " "failed (%d)\n", cookie); ret = -EINVAL; } } else { dd->wc_cookie = cookie; dd->wc_base = (unsigned long) pioaddr; dd->wc_len = (unsigned long) piolen; } } return ret; } /** * qib_disable_wc - disable write combining for MMIO writes to the device * @dd: qlogic_ib device */ void qib_disable_wc(struct qib_devdata *dd) { if (dd->wc_cookie) { int r; r = mtrr_del(dd->wc_cookie, dd->wc_base, dd->wc_len); if (r < 0) qib_devinfo(dd->pcidev, "mtrr_del(%lx, %lx, %lx) failed: %d\n", dd->wc_cookie, dd->wc_base, dd->wc_len, r); dd->wc_cookie = 0; /* even on failure */ } } /** * qib_unordered_wc - indicate whether write combining is ordered * * Because our performance depends on our ability to do write combining mmio * writes in the most efficient way, we need to know if we are on an Intel * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in * the order completed, and so no special flushing is required to get * correct ordering. Intel processors, however, will flush write buffers * out in "random" orders, and so explicit ordering is needed at times. */ int qib_unordered_wc(void) { return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; }
gpl-2.0
alvinhochun/sony-nicki-ss-kernel-caf
drivers/firmware/dmi-sysfs.c
10645
17371
/* * dmi-sysfs.c * * This module exports the DMI tables read-only to userspace through the * sysfs file system. * * Data is currently found below * /sys/firmware/dmi/... * * DMI attributes are presented in attribute files with names * formatted using %d-%d, so that the first integer indicates the * structure type (0-255), and the second field is the instance of that * entry. * * Copyright 2011 Google, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kobject.h> #include <linux/dmi.h> #include <linux/capability.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/io.h> #define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider the top entry type is only 8 bits */ struct dmi_sysfs_entry { struct dmi_header dh; struct kobject kobj; int instance; int position; struct list_head list; struct kobject *child; }; /* * Global list of dmi_sysfs_entry. Even though this should only be * manipulated at setup and teardown, the lazy nature of the kobject * system means we get lazy removes. */ static LIST_HEAD(entry_list); static DEFINE_SPINLOCK(entry_list_lock); /* dmi_sysfs_attribute - Top level attribute. used by all entries. */ struct dmi_sysfs_attribute { struct attribute attr; ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf); }; #define DMI_SYSFS_ATTR(_entry, _name) \ struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0400}, \ .show = dmi_sysfs_##_entry##_##_name, \ } /* * dmi_sysfs_mapped_attribute - Attribute where we require the entry be * mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops. */ struct dmi_sysfs_mapped_attribute { struct attribute attr; ssize_t (*show)(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, char *buf); }; #define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \ struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0400}, \ .show = dmi_sysfs_##_entry##_##_name, \ } /************************************************* * Generic DMI entry support. *************************************************/ static void dmi_entry_free(struct kobject *kobj) { kfree(kobj); } static struct dmi_sysfs_entry *to_entry(struct kobject *kobj) { return container_of(kobj, struct dmi_sysfs_entry, kobj); } static struct dmi_sysfs_attribute *to_attr(struct attribute *attr) { return container_of(attr, struct dmi_sysfs_attribute, attr); } static ssize_t dmi_sysfs_attr_show(struct kobject *kobj, struct attribute *_attr, char *buf) { struct dmi_sysfs_entry *entry = to_entry(kobj); struct dmi_sysfs_attribute *attr = to_attr(_attr); /* DMI stuff is only ever admin visible */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return attr->show(entry, buf); } static const struct sysfs_ops dmi_sysfs_attr_ops = { .show = dmi_sysfs_attr_show, }; typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *, const struct dmi_header *dh, void *); struct find_dmi_data { struct dmi_sysfs_entry *entry; dmi_callback callback; void *private; int instance_countdown; ssize_t ret; }; static void find_dmi_entry_helper(const struct dmi_header *dh, void *_data) { struct find_dmi_data *data = _data; struct dmi_sysfs_entry *entry = data->entry; /* Is this the entry we want? */ if (dh->type != entry->dh.type) return; if (data->instance_countdown != 0) { /* try the next instance? */ data->instance_countdown--; return; } /* * Don't ever revisit the instance. Short circuit later * instances by letting the instance_countdown run negative */ data->instance_countdown--; /* Found the entry */ data->ret = data->callback(entry, dh, data->private); } /* State for passing the read parameters through dmi_find_entry() */ struct dmi_read_state { char *buf; loff_t pos; size_t count; }; static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, dmi_callback callback, void *private) { struct find_dmi_data data = { .entry = entry, .callback = callback, .private = private, .instance_countdown = entry->instance, .ret = -EIO, /* To signal the entry disappeared */ }; int ret; ret = dmi_walk(find_dmi_entry_helper, &data); /* This shouldn't happen, but just in case. */ if (ret) return -EINVAL; return data.ret; } /* * Calculate and return the byte length of the dmi entry identified by * dh. This includes both the formatted portion as well as the * unformatted string space, including the two trailing nul characters. */ static size_t dmi_entry_length(const struct dmi_header *dh) { const char *p = (const char *)dh; p += dh->length; while (p[0] || p[1]) p++; return 2 + p - (const char *)dh; } /************************************************* * Support bits for specialized DMI entry support *************************************************/ struct dmi_entry_attr_show_data { struct attribute *attr; char *buf; }; static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_data) { struct dmi_entry_attr_show_data *data = _data; struct dmi_sysfs_mapped_attribute *attr; attr = container_of(data->attr, struct dmi_sysfs_mapped_attribute, attr); return attr->show(entry, dh, data->buf); } static ssize_t dmi_entry_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct dmi_entry_attr_show_data data = { .attr = attr, .buf = buf, }; /* Find the entry according to our parent and call the * normalized show method hanging off of the attribute */ return find_dmi_entry(to_entry(kobj->parent), dmi_entry_attr_show_helper, &data); } static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = { .show = dmi_entry_attr_show, }; /************************************************* * Specialized DMI entry support. *************************************************/ /*** Type 15 - System Event Table ***/ #define DMI_SEL_ACCESS_METHOD_IO8 0x00 #define DMI_SEL_ACCESS_METHOD_IO2x8 0x01 #define DMI_SEL_ACCESS_METHOD_IO16 0x02 #define DMI_SEL_ACCESS_METHOD_PHYS32 0x03 #define DMI_SEL_ACCESS_METHOD_GPNV 0x04 struct dmi_system_event_log { struct dmi_header header; u16 area_length; u16 header_start_offset; u16 data_start_offset; u8 access_method; u8 status; u32 change_token; union { struct { u16 index_addr; u16 data_addr; } io; u32 phys_addr32; u16 gpnv_handle; u32 access_method_address; }; u8 header_format; u8 type_descriptors_supported_count; u8 per_log_type_descriptor_length; u8 supported_log_type_descriptos[0]; } __packed; #define DMI_SYSFS_SEL_FIELD(_field) \ static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \ const struct dmi_header *dh, \ char *buf) \ { \ struct dmi_system_event_log sel; \ if (sizeof(sel) > dmi_entry_length(dh)) \ return -EIO; \ memcpy(&sel, dh, sizeof(sel)); \ return sprintf(buf, "%u\n", sel._field); \ } \ static DMI_SYSFS_MAPPED_ATTR(sel, _field) DMI_SYSFS_SEL_FIELD(area_length); DMI_SYSFS_SEL_FIELD(header_start_offset); DMI_SYSFS_SEL_FIELD(data_start_offset); DMI_SYSFS_SEL_FIELD(access_method); DMI_SYSFS_SEL_FIELD(status); DMI_SYSFS_SEL_FIELD(change_token); DMI_SYSFS_SEL_FIELD(access_method_address); DMI_SYSFS_SEL_FIELD(header_format); DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count); DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length); static struct attribute *dmi_sysfs_sel_attrs[] = { &dmi_sysfs_attr_sel_area_length.attr, &dmi_sysfs_attr_sel_header_start_offset.attr, &dmi_sysfs_attr_sel_data_start_offset.attr, &dmi_sysfs_attr_sel_access_method.attr, &dmi_sysfs_attr_sel_status.attr, &dmi_sysfs_attr_sel_change_token.attr, &dmi_sysfs_attr_sel_access_method_address.attr, &dmi_sysfs_attr_sel_header_format.attr, &dmi_sysfs_attr_sel_type_descriptors_supported_count.attr, &dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr, NULL, }; static struct kobj_type dmi_system_event_log_ktype = { .release = dmi_entry_free, .sysfs_ops = &dmi_sysfs_specialize_attr_ops, .default_attrs = dmi_sysfs_sel_attrs, }; typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel, loff_t offset); static DEFINE_MUTEX(io_port_lock); static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outb((u8)offset, sel->io.index_addr); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outb((u8)offset, sel->io.index_addr); outb((u8)(offset >> 8), sel->io.index_addr + 1); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outw((u16)offset, sel->io.index_addr); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static sel_io_reader sel_io_readers[] = { [DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io, [DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io, [DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io, }; static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry, const struct dmi_system_event_log *sel, char *buf, loff_t pos, size_t count) { ssize_t wrote = 0; sel_io_reader io_reader = sel_io_readers[sel->access_method]; while (count && pos < sel->area_length) { count--; *(buf++) = io_reader(sel, pos++); wrote++; } return wrote; } static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry, const struct dmi_system_event_log *sel, char *buf, loff_t pos, size_t count) { u8 __iomem *mapped; ssize_t wrote = 0; mapped = ioremap(sel->access_method_address, sel->area_length); if (!mapped) return -EIO; while (count && pos < sel->area_length) { count--; *(buf++) = readb(mapped + pos++); wrote++; } iounmap(mapped); return wrote; } static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_state) { struct dmi_read_state *state = _state; struct dmi_system_event_log sel; if (sizeof(sel) > dmi_entry_length(dh)) return -EIO; memcpy(&sel, dh, sizeof(sel)); switch (sel.access_method) { case DMI_SEL_ACCESS_METHOD_IO8: case DMI_SEL_ACCESS_METHOD_IO2x8: case DMI_SEL_ACCESS_METHOD_IO16: return dmi_sel_raw_read_io(entry, &sel, state->buf, state->pos, state->count); case DMI_SEL_ACCESS_METHOD_PHYS32: return dmi_sel_raw_read_phys32(entry, &sel, state->buf, state->pos, state->count); case DMI_SEL_ACCESS_METHOD_GPNV: pr_info("dmi-sysfs: GPNV support missing.\n"); return -EIO; default: pr_info("dmi-sysfs: Unknown access method %02x\n", sel.access_method); return -EIO; } } static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj->parent); struct dmi_read_state state = { .buf = buf, .pos = pos, .count = count, }; return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state); } static struct bin_attribute dmi_sel_raw_attr = { .attr = {.name = "raw_event_log", .mode = 0400}, .read = dmi_sel_raw_read, }; static int dmi_system_event_log(struct dmi_sysfs_entry *entry) { int ret; entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL); if (!entry->child) return -ENOMEM; ret = kobject_init_and_add(entry->child, &dmi_system_event_log_ktype, &entry->kobj, "system_event_log"); if (ret) goto out_free; ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr); if (ret) goto out_del; return 0; out_del: kobject_del(entry->child); out_free: kfree(entry->child); return ret; } /************************************************* * Generic DMI entry support. *************************************************/ static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.length); } static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.handle); } static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.type); } static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->instance); } static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->position); } static DMI_SYSFS_ATTR(entry, length); static DMI_SYSFS_ATTR(entry, handle); static DMI_SYSFS_ATTR(entry, type); static DMI_SYSFS_ATTR(entry, instance); static DMI_SYSFS_ATTR(entry, position); static struct attribute *dmi_sysfs_entry_attrs[] = { &dmi_sysfs_attr_entry_length.attr, &dmi_sysfs_attr_entry_handle.attr, &dmi_sysfs_attr_entry_type.attr, &dmi_sysfs_attr_entry_instance.attr, &dmi_sysfs_attr_entry_position.attr, NULL, }; static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_state) { struct dmi_read_state *state = _state; size_t entry_length; entry_length = dmi_entry_length(dh); return memory_read_from_buffer(state->buf, state->count, &state->pos, dh, entry_length); } static ssize_t dmi_entry_raw_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj); struct dmi_read_state state = { .buf = buf, .pos = pos, .count = count, }; return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state); } static const struct bin_attribute dmi_entry_raw_attr = { .attr = {.name = "raw", .mode = 0400}, .read = dmi_entry_raw_read, }; static void dmi_sysfs_entry_release(struct kobject *kobj) { struct dmi_sysfs_entry *entry = to_entry(kobj); sysfs_remove_bin_file(&entry->kobj, &dmi_entry_raw_attr); spin_lock(&entry_list_lock); list_del(&entry->list); spin_unlock(&entry_list_lock); kfree(entry); } static struct kobj_type dmi_sysfs_entry_ktype = { .release = dmi_sysfs_entry_release, .sysfs_ops = &dmi_sysfs_attr_ops, .default_attrs = dmi_sysfs_entry_attrs, }; static struct kobject *dmi_kobj; static struct kset *dmi_kset; /* Global count of all instances seen. Only for setup */ static int __initdata instance_counts[MAX_ENTRY_TYPE + 1]; /* Global positional count of all entries seen. Only for setup */ static int __initdata position_count; static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, void *_ret) { struct dmi_sysfs_entry *entry; int *ret = _ret; /* If a previous entry saw an error, short circuit */ if (*ret) return; /* Allocate and register a new entry into the entries set */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { *ret = -ENOMEM; return; } /* Set the key */ memcpy(&entry->dh, dh, sizeof(*dh)); entry->instance = instance_counts[dh->type]++; entry->position = position_count++; entry->kobj.kset = dmi_kset; *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL, "%d-%d", dh->type, entry->instance); if (*ret) { kfree(entry); return; } /* Thread on the global list for cleanup */ spin_lock(&entry_list_lock); list_add_tail(&entry->list, &entry_list); spin_unlock(&entry_list_lock); /* Handle specializations by type */ switch (dh->type) { case DMI_ENTRY_SYSTEM_EVENT_LOG: *ret = dmi_system_event_log(entry); break; default: /* No specialization */ break; } if (*ret) goto out_err; /* Create the raw binary file to access the entry */ *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr); if (*ret) goto out_err; return; out_err: kobject_put(entry->child); kobject_put(&entry->kobj); return; } static void cleanup_entry_list(void) { struct dmi_sysfs_entry *entry, *next; /* No locks, we are on our way out */ list_for_each_entry_safe(entry, next, &entry_list, list) { kobject_put(entry->child); kobject_put(&entry->kobj); } } static int __init dmi_sysfs_init(void) { int error = -ENOMEM; int val; /* Set up our directory */ dmi_kobj = kobject_create_and_add("dmi", firmware_kobj); if (!dmi_kobj) goto err; dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj); if (!dmi_kset) goto err; val = 0; error = dmi_walk(dmi_sysfs_register_handle, &val); if (error) goto err; if (val) { error = val; goto err; } pr_debug("dmi-sysfs: loaded.\n"); return 0; err: cleanup_entry_list(); kset_unregister(dmi_kset); kobject_put(dmi_kobj); return error; } /* clean up everything. */ static void __exit dmi_sysfs_exit(void) { pr_debug("dmi-sysfs: unloading.\n"); cleanup_entry_list(); kset_unregister(dmi_kset); kobject_put(dmi_kobj); } module_init(dmi_sysfs_init); module_exit(dmi_sysfs_exit); MODULE_AUTHOR("Mike Waychison <mikew@google.com>"); MODULE_DESCRIPTION("DMI sysfs support"); MODULE_LICENSE("GPL");
gpl-2.0
Silentlys/android_kernel_zuk_msm8996
net/ipv6/ah6.c
150
18703
/* * Copyright (C)2002 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Authors * * Mitsuru KANDA @USAGI : IPv6 Support * Kazunori MIYAZAWA @USAGI : * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * * This file is derived from net/ipv4/ah.c. */ #define pr_fmt(fmt) "IPv6: " fmt #include <crypto/hash.h> #include <linux/module.h> #include <linux/slab.h> #include <net/ip.h> #include <net/ah.h> #include <linux/crypto.h> #include <linux/pfkeyv2.h> #include <linux/string.h> #include <linux/scatterlist.h> #include <net/ip6_route.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/xfrm.h> #define IPV6HDR_BASELEN 8 struct tmp_ext { #if IS_ENABLED(CONFIG_IPV6_MIP6) struct in6_addr saddr; #endif struct in6_addr daddr; char hdrs[0]; }; struct ah_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, unsigned int size) { unsigned int len; len = size + crypto_ahash_digestsize(ahash) + (crypto_ahash_alignmask(ahash) & ~(crypto_tfm_ctx_alignment() - 1)); len = ALIGN(len, crypto_tfm_ctx_alignment()); len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline struct tmp_ext *ah_tmp_ext(void *base) { return base + IPV6HDR_BASELEN; } static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset) { return tmp + offset; } static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, unsigned int offset) { return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); } static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, u8 *icv) { struct ahash_request *req; req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), crypto_tfm_ctx_alignment()); ahash_request_set_tfm(req, ahash); return req; } static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, struct ahash_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_ahash_reqsize(ahash), __alignof__(struct scatterlist)); } static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) { u8 *opt = (u8 *)opthdr; int len = ipv6_optlen(opthdr); int off = 0; int optlen = 0; off += 2; len -= 2; while (len > 0) { switch (opt[off]) { case IPV6_TLV_PAD1: optlen = 1; break; default: if (len < 2) goto bad; optlen = opt[off+1]+2; if (len < optlen) goto bad; if (opt[off] & 0x20) memset(&opt[off+2], 0, opt[off+1]); break; } off += optlen; len -= optlen; } if (len == 0) return true; bad: return false; } #if IS_ENABLED(CONFIG_IPV6_MIP6) /** * ipv6_rearrange_destopt - rearrange IPv6 destination options header * @iph: IPv6 header * @destopt: destionation options header */ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) { u8 *opt = (u8 *)destopt; int len = ipv6_optlen(destopt); int off = 0; int optlen = 0; off += 2; len -= 2; while (len > 0) { switch (opt[off]) { case IPV6_TLV_PAD1: optlen = 1; break; default: if (len < 2) goto bad; optlen = opt[off+1]+2; if (len < optlen) goto bad; /* Rearrange the source address in @iph and the * addresses in home address option for final source. * See 11.3.2 of RFC 3775 for details. */ if (opt[off] == IPV6_TLV_HAO) { struct in6_addr final_addr; struct ipv6_destopt_hao *hao; hao = (struct ipv6_destopt_hao *)&opt[off]; if (hao->length != sizeof(hao->addr)) { net_warn_ratelimited("destopt hao: invalid header length: %u\n", hao->length); goto bad; } final_addr = hao->addr; hao->addr = iph->saddr; iph->saddr = final_addr; } break; } off += optlen; len -= optlen; } /* Note: ok if len == 0 */ bad: return; } #else static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {} #endif /** * ipv6_rearrange_rthdr - rearrange IPv6 routing header * @iph: IPv6 header * @rthdr: routing header * * Rearrange the destination address in @iph and the addresses in @rthdr * so that they appear in the order they will at the final destination. * See Appendix A2 of RFC 2402 for details. */ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr) { int segments, segments_left; struct in6_addr *addrs; struct in6_addr final_addr; segments_left = rthdr->segments_left; if (segments_left == 0) return; rthdr->segments_left = 0; /* The value of rthdr->hdrlen has been verified either by the system * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming * packets. So we can assume that it is even and that segments is * greater than or equal to segments_left. * * For the same reason we can assume that this option is of type 0. */ segments = rthdr->hdrlen >> 1; addrs = ((struct rt0_hdr *)rthdr)->addr; final_addr = addrs[segments - 1]; addrs += segments - segments_left; memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs)); addrs[0] = iph->daddr; iph->daddr = final_addr; } static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) { union { struct ipv6hdr *iph; struct ipv6_opt_hdr *opth; struct ipv6_rt_hdr *rth; char *raw; } exthdr = { .iph = iph }; char *end = exthdr.raw + len; int nexthdr = iph->nexthdr; exthdr.iph++; while (exthdr.raw < end) { switch (nexthdr) { case NEXTHDR_DEST: if (dir == XFRM_POLICY_OUT) ipv6_rearrange_destopt(iph, exthdr.opth); case NEXTHDR_HOP: if (!zero_out_mutable_opts(exthdr.opth)) { LIMIT_NETDEBUG( KERN_WARNING "overrun %sopts\n", nexthdr == NEXTHDR_HOP ? "hop" : "dest"); return -EINVAL; } break; case NEXTHDR_ROUTING: ipv6_rearrange_rthdr(iph, exthdr.rth); break; default: return 0; } nexthdr = exthdr.opth->nexthdr; exthdr.raw += ipv6_optlen(exthdr.opth); } return 0; } static void ah6_output_done(struct crypto_async_request *base, int err) { int extlen; u8 *iph_base; u8 *icv; struct sk_buff *skb = base->data; struct xfrm_state *x = skb_dst(skb)->xfrm; struct ah_data *ahp = x->data; struct ipv6hdr *top_iph = ipv6_hdr(skb); struct ip_auth_hdr *ah = ip_auth_hdr(skb); struct tmp_ext *iph_ext; extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); if (extlen) extlen += sizeof(*iph_ext); iph_base = AH_SKB_CB(skb)->tmp; iph_ext = ah_tmp_ext(iph_base); icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen); memcpy(ah->auth_data, icv, ahp->icv_trunc_len); memcpy(top_iph, iph_base, IPV6HDR_BASELEN); if (extlen) { #if IS_ENABLED(CONFIG_IPV6_MIP6) memcpy(&top_iph->saddr, iph_ext, extlen); #else memcpy(&top_iph->daddr, iph_ext, extlen); #endif } kfree(AH_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int nfrags; int extlen; u8 *iph_base; u8 *icv; u8 nexthdr; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct ipv6hdr *top_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; struct tmp_ext *iph_ext; int seqhi_len = 0; __be32 *seqhi; int sglists = 0; struct scatterlist *seqhisg; ahp = x->data; ahash = ahp->ahash; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; skb_push(skb, -skb_network_offset(skb)); extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); if (extlen) extlen += sizeof(*iph_ext); if (x->props.flags & XFRM_STATE_ESN) { sglists = 1; seqhi_len = sizeof(*seqhi); } err = -ENOMEM; iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN + extlen + seqhi_len); if (!iph_base) goto out; iph_ext = ah_tmp_ext(iph_base); seqhi = (__be32 *)((char *)iph_ext + extlen); icv = ah_tmp_icv(ahash, seqhi, seqhi_len); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); seqhisg = sg + nfrags; ah = ip_auth_hdr(skb); memset(ah->auth_data, 0, ahp->icv_trunc_len); top_iph = ipv6_hdr(skb); top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); nexthdr = *skb_mac_header(skb); *skb_mac_header(skb) = IPPROTO_AH; /* When there are no extension headers, we only need to save the first * 8 bytes of the base IP header. */ memcpy(iph_base, top_iph, IPV6HDR_BASELEN); if (extlen) { #if IS_ENABLED(CONFIG_IPV6_MIP6) memcpy(iph_ext, &top_iph->saddr, extlen); #else memcpy(iph_ext, &top_iph->daddr, extlen); #endif err = ipv6_clear_mutable_options(top_iph, extlen - sizeof(*iph_ext) + sizeof(*top_iph), XFRM_POLICY_OUT); if (err) goto out_free; } ah->nexthdr = nexthdr; top_iph->priority = 0; top_iph->flow_lbl[0] = 0; top_iph->flow_lbl[1] = 0; top_iph->flow_lbl[2] = 0; top_iph->hop_limit = 0; ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags + sglists); skb_to_sgvec_nomark(skb, sg, 0, skb->len); if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); sg_set_buf(seqhisg, seqhi, seqhi_len); } ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ahash_request_set_callback(req, 0, ah6_output_done, skb); AH_SKB_CB(skb)->tmp = iph_base; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; if (err == -EBUSY) err = NET_XMIT_DROP; goto out_free; } memcpy(ah->auth_data, icv, ahp->icv_trunc_len); memcpy(top_iph, iph_base, IPV6HDR_BASELEN); if (extlen) { #if IS_ENABLED(CONFIG_IPV6_MIP6) memcpy(&top_iph->saddr, iph_ext, extlen); #else memcpy(&top_iph->daddr, iph_ext, extlen); #endif } out_free: kfree(iph_base); out: return err; } static void ah6_input_done(struct crypto_async_request *base, int err) { u8 *auth_data; u8 *icv; u8 *work_iph; struct sk_buff *skb = base->data; struct xfrm_state *x = xfrm_input_state(skb); struct ah_data *ahp = x->data; struct ip_auth_hdr *ah = ip_auth_hdr(skb); int hdr_len = skb_network_header_len(skb); int ah_hlen = (ah->hdrlen + 2) << 2; work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, hdr_len); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out; err = ah->nexthdr; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, hdr_len); __skb_pull(skb, ah_hlen + hdr_len); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -hdr_len); out: kfree(AH_SKB_CB(skb)->tmp); xfrm_input_resume(skb, err); } static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) { /* * Before process AH * [IPv6][Ext1][Ext2][AH][Dest][Payload] * |<-------------->| hdr_len * * To erase AH: * Keeping copy of cleared headers. After AH processing, * Moving the pointer of skb->network_header by using skb_pull as long * as AH header length. Then copy back the copy as long as hdr_len * If destination header following AH exists, copy it into after [Ext2]. * * |<>|[IPv6][Ext1][Ext2][Dest][Payload] * There is offset of AH before IPv6 header after the process. */ u8 *auth_data; u8 *icv; u8 *work_iph; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct ip_auth_hdr *ah; struct ipv6hdr *ip6h; struct ah_data *ahp; u16 hdr_len; u16 ah_hlen; int nexthdr; int nfrags; int err = -ENOMEM; int seqhi_len = 0; __be32 *seqhi; int sglists = 0; struct scatterlist *seqhisg; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_unclone(skb, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; hdr_len = skb_network_header_len(skb); ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; ahash = ahp->ahash; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) goto out; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; ah = (struct ip_auth_hdr *)skb->data; ip6h = ipv6_hdr(skb); skb_push(skb, hdr_len); if (x->props.flags & XFRM_STATE_ESN) { sglists = 1; seqhi_len = sizeof(*seqhi); } work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len + ahp->icv_trunc_len + seqhi_len); if (!work_iph) goto out; auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len); seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len); icv = ah_tmp_icv(ahash, seqhi, seqhi_len); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); seqhisg = sg + nfrags; memcpy(work_iph, ip6h, hdr_len); memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) goto out_free; ip6h->priority = 0; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->hop_limit = 0; sg_init_table(sg, nfrags + sglists); skb_to_sgvec_nomark(skb, sg, 0, skb->len); if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; sg_set_buf(seqhisg, seqhi, seqhi_len); } ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ahash_request_set_callback(req, 0, ah6_input_done, skb); AH_SKB_CB(skb)->tmp = work_iph; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; goto out_free; } err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out_free; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, hdr_len); __skb_pull(skb, ah_hlen + hdr_len); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -hdr_len); err = nexthdr; out_free: kfree(work_iph); out: return err; } static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return 0; x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); if (!x) return 0; if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0); else ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID); xfrm_state_put(x); return 0; } static int ah6_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; struct crypto_ahash *ahash; if (!x->aalg) goto error; if (x->encap) goto error; ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); if (IS_ERR(ahash)) goto error; ahp->ahash = ahash; if (crypto_ahash_setkey(ahash, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8)) goto error; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_hash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { pr_info("AH: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_ahash_digestsize(ahash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); switch (x->props.mode) { case XFRM_MODE_BEET: case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: goto error; } x->data = ahp; return 0; error: if (ahp) { crypto_free_ahash(ahp->ahash); kfree(ahp); } return -EINVAL; } static void ah6_destroy(struct xfrm_state *x) { struct ah_data *ahp = x->data; if (!ahp) return; crypto_free_ahash(ahp->ahash); kfree(ahp); } static int ah6_rcv_cb(struct sk_buff *skb, int err) { return 0; } static const struct xfrm_type ah6_type = { .description = "AH6", .owner = THIS_MODULE, .proto = IPPROTO_AH, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah6_init_state, .destructor = ah6_destroy, .input = ah6_input, .output = ah6_output, .hdr_offset = xfrm6_find_1stfragopt, }; static struct xfrm6_protocol ah6_protocol = { .handler = xfrm6_rcv, .cb_handler = ah6_rcv_cb, .err_handler = ah6_err, .priority = 0, }; static int __init ah6_init(void) { if (xfrm_register_type(&ah6_type, AF_INET6) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit ah6_fini(void) { if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ah6_init); module_exit(ah6_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
gpl-2.0
yangoliver/linux
drivers/net/wireless/iwlwifi/mvm/tof.c
150
9738
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "mvm.h" #include "fw-api-tof.h" #define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 void iwl_mvm_tof_init(struct iwl_mvm *mvm) { struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return; memset(tof_data, 0, sizeof(*tof_data)); tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); #ifdef CONFIG_IWLWIFI_DEBUGFS if (IWL_MVM_TOF_IS_RESPONDER) { tof_data->responder_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; } #endif tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); tof_data->range_req.req_timeout = 1; tof_data->range_req.initiator = 1; tof_data->range_req.report_policy = 3; tof_data->range_req_ext.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; } void iwl_mvm_tof_clean(struct iwl_mvm *mvm) { struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return; memset(tof_data, 0, sizeof(*tof_data)); mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; } static void iwl_tof_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { bool *enabled = _data; /* non bss vif exists */ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) *enabled = false; } int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) { struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; bool enabled; lockdep_assert_held(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return -EINVAL; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_tof_iterator, &enabled); if (!enabled) { IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); return -EINVAL; } mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), 0, sizeof(*cmd), cmd); } int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) { struct iwl_tof_range_abort_cmd cmd = { .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), .request_id = id, }; lockdep_assert_held(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return -EINVAL; if (id != mvm->tof_data.active_range_request) { IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", id, mvm->tof_data.active_range_request); return -EINVAL; } /* after abort is sent there's no active request anymore */ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), 0, sizeof(cmd), &cmd); } #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return -EINVAL; if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active) { IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); return -EIO; } cmd->sta_id = mvmvif->bcast_sta.sta_id; memcpy(cmd->bssid, vif->addr, ETH_ALEN); return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), 0, sizeof(*cmd), cmd); } #endif int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_host_cmd cmd = { .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), .len = { sizeof(mvm->tof_data.range_req), }, /* no copy because of the command size */ .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; lockdep_assert_held(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return -EINVAL; if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); return -EIO; } /* nesting of range requests is not supported in FW */ if (mvm->tof_data.active_range_request != IWL_MVM_TOF_RANGE_REQ_MAX_ID) { IWL_ERR(mvm, "Cannot send range req, already active req %d\n", mvm->tof_data.active_range_request); return -EIO; } mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; cmd.data[0] = &mvm->tof_data.range_req; return iwl_mvm_send_cmd(mvm, &cmd); } int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { lockdep_assert_held(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) return -EINVAL; if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); return -EIO; } return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), 0, sizeof(mvm->tof_data.range_req_ext), &mvm->tof_data.range_req_ext); } static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) { struct iwl_tof_range_rsp_ntfy *resp = (void *)data; if (resp->request_id != mvm->tof_data.active_range_request) { IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", resp->request_id, mvm->tof_data.active_range_request); return -EIO; } memcpy(&mvm->tof_data.range_resp, resp, sizeof(struct iwl_tof_range_rsp_ntfy)); mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; return 0; } static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) { struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); return 0; } static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) { struct iwl_tof_neighbor_report *report = (struct iwl_tof_neighbor_report *)data; IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", report->bssid, report->request_token, report->status); return 0; } void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; lockdep_assert_held(&mvm->mutex); switch (le32_to_cpu(resp->sub_grp_cmd_id)) { case TOF_RANGE_RESPONSE_NOTIF: iwl_mvm_tof_range_resp(mvm, resp->data); break; case TOF_MCSI_DEBUG_NOTIF: iwl_mvm_tof_mcsi_notif(mvm, resp->data); break; case TOF_NEIGHBOR_REPORT_RSP_NOTIF: iwl_mvm_tof_nb_report_notif(mvm, resp->data); break; default: IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", resp->sub_grp_cmd_id); break; } }
gpl-2.0
miaoxie/linux-btrfs
net/tipc/core.c
406
4893
/* * net/tipc/core.c: TIPC module code * * Copyright (c) 2003-2006, 2013, Ericsson AB * Copyright (c) 2005-2006, 2010-2013, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "name_table.h" #include "subscr.h" #include "config.h" #include "socket.h" #include <linux/module.h> /* global variables used by multiple sub-systems within TIPC */ int tipc_random __read_mostly; /* configurable TIPC parameters */ u32 tipc_own_addr __read_mostly; int tipc_max_ports __read_mostly; int tipc_net_id __read_mostly; int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */ /** * tipc_buf_acquire - creates a TIPC message buffer * @size: message size (including TIPC header) * * Returns a new buffer with data pointers set to the specified size. * * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ struct sk_buff *tipc_buf_acquire(u32 size) { struct sk_buff *skb; unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); skb->next = NULL; } return skb; } /** * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode */ static void tipc_core_stop(void) { tipc_net_stop(); tipc_bearer_cleanup(); tipc_netlink_stop(); tipc_subscr_stop(); tipc_nametbl_stop(); tipc_sk_ref_table_stop(); tipc_socket_stop(); tipc_unregister_sysctl(); } /** * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode */ static int tipc_core_start(void) { int err; get_random_bytes(&tipc_random, sizeof(tipc_random)); err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random); if (err) goto out_reftbl; err = tipc_nametbl_init(); if (err) goto out_nametbl; err = tipc_netlink_start(); if (err) goto out_netlink; err = tipc_socket_init(); if (err) goto out_socket; err = tipc_register_sysctl(); if (err) goto out_sysctl; err = tipc_subscr_start(); if (err) goto out_subscr; err = tipc_bearer_setup(); if (err) goto out_bearer; return 0; out_bearer: tipc_subscr_stop(); out_subscr: tipc_unregister_sysctl(); out_sysctl: tipc_socket_stop(); out_socket: tipc_netlink_stop(); out_netlink: tipc_nametbl_stop(); out_nametbl: tipc_sk_ref_table_stop(); out_reftbl: return err; } static int __init tipc_init(void) { int res; pr_info("Activated (version " TIPC_MOD_VER ")\n"); tipc_own_addr = 0; tipc_max_ports = CONFIG_TIPC_PORTS; tipc_net_id = 4711; sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE; sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << TIPC_CRITICAL_IMPORTANCE; sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT; res = tipc_core_start(); if (res) pr_err("Unable to start in single node mode\n"); else pr_info("Started in single node mode\n"); return res; } static void __exit tipc_exit(void) { tipc_core_stop(); pr_info("Deactivated\n"); } module_init(tipc_init); module_exit(tipc_exit); MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(TIPC_MOD_VER);
gpl-2.0
agx/linux-wpan-next
arch/sparc/kernel/leon_pci_grpci2.c
662
24725
/* * leon_pci_grpci2.c: GRPCI2 Host PCI driver * * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom * */ #include <linux/of_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/export.h> #include <asm/io.h> #include <asm/leon.h> #include <asm/vaddrs.h> #include <asm/sections.h> #include <asm/leon_pci.h> #include "irq.h" struct grpci2_barcfg { unsigned long pciadr; /* PCI Space Address */ unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */ }; /* Device Node Configuration options: * - barcfgs : Custom Configuration of Host's 6 target BARs * - irq_mask : Limit which PCI interrupts are enabled * - do_reset : Force PCI Reset on startup * * barcfgs * ======= * * Optional custom Target BAR configuration (see struct grpci2_barcfg). All * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes) * * -1 means not configured (let host driver do default setup). * * [i*2+0] = PCI Address of BAR[i] on target interface * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address * * * irq_mask * ======== * * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default * all are enabled. Use this when PCI interrupt pins are floating on PCB. * int, len=4. * bit0 = PCI INTA# * bit1 = PCI INTB# * bit2 = PCI INTC# * bit3 = PCI INTD# * * * reset * ===== * * Force PCI reset on startup. int, len=4 */ /* Enable Debugging Configuration Space Access */ #undef GRPCI2_DEBUG_CFGACCESS /* * GRPCI2 APB Register MAP */ struct grpci2_regs { unsigned int ctrl; /* 0x00 Control */ unsigned int sts_cap; /* 0x04 Status / Capabilities */ int res1; /* 0x08 */ unsigned int io_map; /* 0x0C I/O Map address */ unsigned int dma_ctrl; /* 0x10 DMA */ unsigned int dma_bdbase; /* 0x14 DMA */ int res2[2]; /* 0x18 */ unsigned int bars[6]; /* 0x20 read-only PCI BARs */ int res3[2]; /* 0x38 */ unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */ /* PCI Trace Buffer Registers (OPTIONAL) */ unsigned int t_ctrl; /* 0x80 */ unsigned int t_cnt; /* 0x84 */ unsigned int t_adpat; /* 0x88 */ unsigned int t_admask; /* 0x8C */ unsigned int t_sigpat; /* 0x90 */ unsigned int t_sigmask; /* 0x94 */ unsigned int t_adstate; /* 0x98 */ unsigned int t_sigstate; /* 0x9C */ }; #define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) #define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) #define CTRL_BUS_BIT 16 #define CTRL_RESET (1<<31) #define CTRL_SI (1<<27) #define CTRL_PE (1<<26) #define CTRL_EI (1<<25) #define CTRL_ER (1<<24) #define CTRL_BUS (0xff<<CTRL_BUS_BIT) #define CTRL_HOSTINT 0xf #define STS_HOST_BIT 31 #define STS_MST_BIT 30 #define STS_TAR_BIT 29 #define STS_DMA_BIT 28 #define STS_DI_BIT 27 #define STS_HI_BIT 26 #define STS_IRQMODE_BIT 24 #define STS_TRACE_BIT 23 #define STS_CFGERRVALID_BIT 20 #define STS_CFGERR_BIT 19 #define STS_INTTYPE_BIT 12 #define STS_INTSTS_BIT 8 #define STS_FDEPTH_BIT 2 #define STS_FNUM_BIT 0 #define STS_HOST (1<<STS_HOST_BIT) #define STS_MST (1<<STS_MST_BIT) #define STS_TAR (1<<STS_TAR_BIT) #define STS_DMA (1<<STS_DMA_BIT) #define STS_DI (1<<STS_DI_BIT) #define STS_HI (1<<STS_HI_BIT) #define STS_IRQMODE (0x3<<STS_IRQMODE_BIT) #define STS_TRACE (1<<STS_TRACE_BIT) #define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT) #define STS_CFGERR (1<<STS_CFGERR_BIT) #define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT) #define STS_INTSTS (0xf<<STS_INTSTS_BIT) #define STS_FDEPTH (0x7<<STS_FDEPTH_BIT) #define STS_FNUM (0x3<<STS_FNUM_BIT) #define STS_ISYSERR (1<<17) #define STS_IDMA (1<<16) #define STS_IDMAERR (1<<15) #define STS_IMSTABRT (1<<14) #define STS_ITGTABRT (1<<13) #define STS_IPARERR (1<<12) #define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR) struct grpci2_bd_chan { unsigned int ctrl; /* 0x00 DMA Control */ unsigned int nchan; /* 0x04 Next DMA Channel Address */ unsigned int nbd; /* 0x08 Next Data Descriptor in chan */ unsigned int res; /* 0x0C Reserved */ }; #define BD_CHAN_EN 0x80000000 #define BD_CHAN_TYPE 0x00300000 #define BD_CHAN_BDCNT 0x0000ffff #define BD_CHAN_EN_BIT 31 #define BD_CHAN_TYPE_BIT 20 #define BD_CHAN_BDCNT_BIT 0 struct grpci2_bd_data { unsigned int ctrl; /* 0x00 DMA Data Control */ unsigned int pci_adr; /* 0x04 PCI Start Address */ unsigned int ahb_adr; /* 0x08 AHB Start address */ unsigned int next; /* 0x0C Next Data Descriptor in chan */ }; #define BD_DATA_EN 0x80000000 #define BD_DATA_IE 0x40000000 #define BD_DATA_DR 0x20000000 #define BD_DATA_TYPE 0x00300000 #define BD_DATA_ER 0x00080000 #define BD_DATA_LEN 0x0000ffff #define BD_DATA_EN_BIT 31 #define BD_DATA_IE_BIT 30 #define BD_DATA_DR_BIT 29 #define BD_DATA_TYPE_BIT 20 #define BD_DATA_ER_BIT 19 #define BD_DATA_LEN_BIT 0 /* GRPCI2 Capability */ struct grpci2_cap_first { unsigned int ctrl; unsigned int pci2ahb_map[6]; unsigned int ext2ahb_map; unsigned int io_map; unsigned int pcibar_size[6]; }; #define CAP9_CTRL_OFS 0 #define CAP9_BAR_OFS 0x4 #define CAP9_IOMAP_OFS 0x20 #define CAP9_BARSIZE_OFS 0x24 #define TGT 256 struct grpci2_priv { struct leon_pci_info info; /* must be on top of this structure */ struct grpci2_regs __iomem *regs; char irq; char irq_mode; /* IRQ Mode from CAPSTS REG */ char bt_enabled; char do_reset; char irq_mask; u32 pciid; /* PCI ID of Host */ unsigned char irq_map[4]; /* Virtual IRQ numbers */ unsigned int virq_err; unsigned int virq_dma; /* AHB PCI Windows */ unsigned long pci_area; /* MEMORY */ unsigned long pci_area_end; unsigned long pci_io; /* I/O */ unsigned long pci_conf; /* CONFIGURATION */ unsigned long pci_conf_end; unsigned long pci_io_va; struct grpci2_barcfg tgtbars[6]; }; static DEFINE_SPINLOCK(grpci2_dev_lock); static struct grpci2_priv *grpci2priv; static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct grpci2_priv *priv = dev->bus->sysdata; int irq_group; /* Use default IRQ decoding on PCI BUS0 according slot numbering */ irq_group = slot & 0x3; pin = ((pin - 1) + irq_group) & 0x3; return priv->irq_map[pin]; } static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { unsigned int *pci_conf; unsigned long flags; u32 tmp; if (where & 0x3) return -EINVAL; if (bus == 0) { devfn += (0x8 * 6); /* start at AD16=Device0 */ } else if (bus == TGT) { bus = 0; devfn = 0; /* special case: bridge controller itself */ } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | (bus << 16)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); /* clear old status */ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); pci_conf = (unsigned int *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); tmp = LEON3_BYPASS_LOAD_PA(pci_conf); /* Wait until GRPCI2 signals that CFG access is done, it should be * done instantaneously unless a DMA operation is ongoing... */ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) ; if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) { *val = 0xffffffff; } else { /* Bus always little endian (unaffected by byte-swapping) */ *val = swab32(tmp); } return 0; } static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { u32 v; int ret; if (where & 0x1) return -EINVAL; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); *val = 0xffff & (v >> (8 * (where & 0x3))); return ret; } static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { u32 v; int ret; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); *val = 0xff & (v >> (8 * (where & 3))); return ret; } static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { unsigned int *pci_conf; unsigned long flags; if (where & 0x3) return -EINVAL; if (bus == 0) { devfn += (0x8 * 6); /* start at AD16=Device0 */ } else if (bus == TGT) { bus = 0; devfn = 0; /* special case: bridge controller itself */ } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | (bus << 16)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); /* clear old status */ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); pci_conf = (unsigned int *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); LEON3_BYPASS_STORE_PA(pci_conf, swab32(val)); /* Wait until GRPCI2 signals that CFG access is done, it should be * done instantaneously unless a DMA operation is ongoing... */ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) ; return 0; } static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { int ret; u32 v; if (where & 0x1) return -EINVAL; ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v); if (ret) return ret; v = (v & ~(0xffff << (8 * (where & 0x3)))) | ((0xffff & val) << (8 * (where & 0x3))); return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); } static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { int ret; u32 v; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); if (ret != 0) return ret; v = (v & ~(0xff << (8 * (where & 0x3)))) | ((0xff & val) << (8 * (where & 0x3))); return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); } /* Read from Configuration Space. When entering here the PCI layer has taken * the pci_lock spinlock and IRQ is off. */ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; int ret; if (PCI_SLOT(devfn) > 15 || busno > 255) { *val = ~0; return 0; } switch (size) { case 1: ret = grpci2_cfg_r8(priv, busno, devfn, where, val); break; case 2: ret = grpci2_cfg_r16(priv, busno, devfn, where, val); break; case 4: ret = grpci2_cfg_r32(priv, busno, devfn, where, val); break; default: ret = -EINVAL; break; } #ifdef GRPCI2_DEBUG_CFGACCESS printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x " "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size); #endif return ret; } /* Write to Configuration Space. When entering here the PCI layer has taken * the pci_lock spinlock and IRQ is off. */ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; if (PCI_SLOT(devfn) > 15 || busno > 255) return 0; #ifdef GRPCI2_DEBUG_CFGACCESS printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d " "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); #endif switch (size) { default: return -EINVAL; case 1: return grpci2_cfg_w8(priv, busno, devfn, where, val); case 2: return grpci2_cfg_w16(priv, busno, devfn, where, val); case 4: return grpci2_cfg_w32(priv, busno, devfn, where, val); } } static struct pci_ops grpci2_ops = { .read = grpci2_read_config, .write = grpci2_write_config, }; /* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller * this is not needed and the standard IRQ controller can be used. */ static void grpci2_mask_irq(struct irq_data *data) { unsigned long flags; unsigned int irqidx; struct grpci2_priv *priv = grpci2priv; irqidx = (unsigned int)data->chip_data - 1; if (irqidx > 3) /* only mask PCI interrupts here */ return; spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); } static void grpci2_unmask_irq(struct irq_data *data) { unsigned long flags; unsigned int irqidx; struct grpci2_priv *priv = grpci2priv; irqidx = (unsigned int)data->chip_data - 1; if (irqidx > 3) /* only unmask PCI interrupts here */ return; spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); } static unsigned int grpci2_startup_irq(struct irq_data *data) { grpci2_unmask_irq(data); return 0; } static void grpci2_shutdown_irq(struct irq_data *data) { grpci2_mask_irq(data); } static struct irq_chip grpci2_irq = { .name = "grpci2", .irq_startup = grpci2_startup_irq, .irq_shutdown = grpci2_shutdown_irq, .irq_mask = grpci2_mask_irq, .irq_unmask = grpci2_unmask_irq, }; /* Handle one or multiple IRQs from the PCI core */ static void grpci2_pci_flow_irq(struct irq_desc *desc) { struct grpci2_priv *priv = grpci2priv; int i, ack = 0; unsigned int ctrl, sts_cap, pci_ints; ctrl = REGLOAD(priv->regs->ctrl); sts_cap = REGLOAD(priv->regs->sts_cap); /* Error Interrupt? */ if (sts_cap & STS_ERR_IRQ) { generic_handle_irq(priv->virq_err); ack = 1; } /* PCI Interrupt? */ pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT; if (pci_ints) { /* Call respective PCI Interrupt handler */ for (i = 0; i < 4; i++) { if (pci_ints & (1 << i)) generic_handle_irq(priv->irq_map[i]); } ack = 1; } /* * Decode DMA Interrupt only when shared with Err and PCI INTX#, when * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they * goes directly to DMA ISR. */ if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) { generic_handle_irq(priv->virq_dma); ack = 1; } /* * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ * Controller, this must be done after IRQ sources have been handled to * avoid double IRQ generation */ if (ack) desc->irq_data.chip->irq_eoi(&desc->irq_data); } /* Create a virtual IRQ */ static unsigned int grpci2_build_device_irq(unsigned int irq) { unsigned int virq = 0, pil; pil = 1 << 8; virq = irq_alloc(irq, pil); if (virq == 0) goto out; irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq, "pcilvl"); irq_set_chip_data(virq, (void *)irq); out: return virq; } static void grpci2_hw_init(struct grpci2_priv *priv) { u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; struct grpci2_regs __iomem *regs = priv->regs; int i; struct grpci2_barcfg *barcfg = priv->tgtbars; /* Reset any earlier setup */ if (priv->do_reset) { printk(KERN_INFO "GRPCI2: Resetting PCI bus\n"); REGSTORE(regs->ctrl, CTRL_RESET); ssleep(1); /* Wait for boards to settle */ } REGSTORE(regs->ctrl, 0); REGSTORE(regs->sts_cap, ~0); /* Clear Status */ REGSTORE(regs->dma_ctrl, 0); REGSTORE(regs->dma_bdbase, 0); /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff); /* set 1:1 mapping between AHB -> PCI memory space, for all Masters * Each AHB master has it's own mapping registers. Max 16 AHB masters. */ for (i = 0; i < 16; i++) REGSTORE(regs->ahbmst_map[i], priv->pci_area); /* Get the GRPCI2 Host PCI ID */ grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); /* Get address to first (always defined) capability structure */ grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); /* Enable/Disable Byte twisting */ grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); /* Setup the Host's PCI Target BARs for other peripherals to access, * and do DMA to the host's memory. The target BARs can be sized and * enabled individually. * * User may set custom target BARs, but default is: * The first BARs is used to map kernel low (DMA is part of normal * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the * PCI bus, the other BARs are disabled. We assume that the first BAR * is always available. */ for (i = 0; i < 6; i++) { if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) { /* Target BARs must have the proper alignment */ ahbadr = barcfg[i].ahbadr; pciadr = barcfg[i].pciadr; bar_sz = ((pciadr - 1) & ~pciadr) + 1; } else { if (i == 0) { /* Map main memory */ bar_sz = 0xf0000008; /* 256MB prefetchable */ ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN( (unsigned long) &_end)); pciadr = ahbadr; } else { bar_sz = 0; ahbadr = 0; pciadr = 0; } } grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", i, pciadr, ahbadr); } /* set as bus master and enable pci memory responses */ grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); /* Enable Error respone (CPU-TRAP) on illegal memory access. */ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); } static irqreturn_t grpci2_jump_interrupt(int irq, void *arg) { printk(KERN_ERR "GRPCI2: Jump IRQ happened\n"); return IRQ_NONE; } /* Handle GRPCI2 Error Interrupt */ static irqreturn_t grpci2_err_interrupt(int irq, void *arg) { struct grpci2_priv *priv = arg; struct grpci2_regs __iomem *regs = priv->regs; unsigned int status; status = REGLOAD(regs->sts_cap); if ((status & STS_ERR_IRQ) == 0) return IRQ_NONE; if (status & STS_IPARERR) printk(KERN_ERR "GRPCI2: Parity Error\n"); if (status & STS_ITGTABRT) printk(KERN_ERR "GRPCI2: Target Abort\n"); if (status & STS_IMSTABRT) printk(KERN_ERR "GRPCI2: Master Abort\n"); if (status & STS_ISYSERR) printk(KERN_ERR "GRPCI2: System Error\n"); /* Clear handled INT TYPE IRQs */ REGSTORE(regs->sts_cap, status & STS_ERR_IRQ); return IRQ_HANDLED; } static int grpci2_of_probe(struct platform_device *ofdev) { struct grpci2_regs __iomem *regs; struct grpci2_priv *priv; int err, i, len; const int *tmp; unsigned int capability; if (grpci2priv) { printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n"); return -ENODEV; } if (ofdev->num_resources < 3) { printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n"); return -EIO; } /* Find Device Address */ regs = of_ioremap(&ofdev->resource[0], 0, resource_size(&ofdev->resource[0]), "grlib-grpci2 regs"); if (regs == NULL) { printk(KERN_ERR "GRPCI2: ioremap failed\n"); return -EIO; } /* * Check that we're in Host Slot and that we can act as a Host Bridge * and not only as target. */ capability = REGLOAD(regs->sts_cap); if ((capability & STS_HOST) || !(capability & STS_MST)) { printk(KERN_INFO "GRPCI2: not in host system slot\n"); err = -EIO; goto err1; } priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL); if (grpci2priv == NULL) { err = -ENOMEM; goto err1; } priv->regs = regs; priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq); /* Byte twisting should be made configurable from kernel command line */ priv->bt_enabled = 1; /* Let user do custom Target BAR assignment */ tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len); if (tmp && (len == 2*4*6)) memcpy(priv->tgtbars, tmp, 2*4*6); else memset(priv->tgtbars, -1, 2*4*6); /* Limit IRQ unmasking in irq_mode 2 and 3 */ tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len); if (tmp && (len == 4)) priv->do_reset = *tmp; else priv->irq_mask = 0xf; /* Optional PCI reset. Force PCI reset on startup */ tmp = of_get_property(ofdev->dev.of_node, "reset", &len); if (tmp && (len == 4)) priv->do_reset = *tmp; else priv->do_reset = 0; /* Find PCI Memory, I/O and Configuration Space Windows */ priv->pci_area = ofdev->resource[1].start; priv->pci_area_end = ofdev->resource[1].end+1; priv->pci_io = ofdev->resource[2].start; priv->pci_conf = ofdev->resource[2].start + 0x10000; priv->pci_conf_end = priv->pci_conf + 0x10000; priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); if (!priv->pci_io_va) { err = -EIO; goto err2; } printk(KERN_INFO "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n" " I/O SPACE [0x%08lx - 0x%08lx]\n" " CONFIG SPACE [0x%08lx - 0x%08lx]\n", priv->pci_area, priv->pci_area_end-1, priv->pci_io, priv->pci_conf-1, priv->pci_conf, priv->pci_conf_end-1); /* * I/O Space resources in I/O Window mapped into Virtual Adr Space * We never use low 4KB because some devices seem have problems using * address 0. */ memset(&priv->info.io_space, 0, sizeof(struct resource)); priv->info.io_space.name = "GRPCI2 PCI I/O Space"; priv->info.io_space.start = priv->pci_io_va + 0x1000; priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; priv->info.io_space.flags = IORESOURCE_IO; /* * GRPCI2 has no prefetchable memory, map everything as * non-prefetchable memory */ memset(&priv->info.mem_space, 0, sizeof(struct resource)); priv->info.mem_space.name = "GRPCI2 PCI MEM Space"; priv->info.mem_space.start = priv->pci_area; priv->info.mem_space.end = priv->pci_area_end - 1; priv->info.mem_space.flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) goto err3; if (request_resource(&ioport_resource, &priv->info.io_space) < 0) goto err4; /* setup maximum supported PCI buses */ priv->info.busn.name = "GRPCI2 busn"; priv->info.busn.start = 0; priv->info.busn.end = 255; grpci2_hw_init(priv); /* * Get PCI Interrupt to System IRQ mapping and setup IRQ handling * Error IRQ always on PCI INTA. */ if (priv->irq_mode < 2) { /* All PCI interrupts are shared using the same system IRQ */ leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq, "pcilvl", 0); priv->irq_map[0] = grpci2_build_device_irq(1); priv->irq_map[1] = grpci2_build_device_irq(2); priv->irq_map[2] = grpci2_build_device_irq(3); priv->irq_map[3] = grpci2_build_device_irq(4); priv->virq_err = grpci2_build_device_irq(5); if (priv->irq_mode & 1) priv->virq_dma = ofdev->archdata.irqs[1]; else priv->virq_dma = grpci2_build_device_irq(6); /* Enable IRQs on LEON IRQ controller */ err = request_irq(priv->irq, grpci2_jump_interrupt, 0, "GRPCI2_JUMP", priv); if (err) printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n"); } else { /* All PCI interrupts have an unique IRQ interrupt */ for (i = 0; i < 4; i++) { /* Make LEON IRQ layer handle level IRQ by acking */ leon_update_virq_handling(ofdev->archdata.irqs[i], handle_fasteoi_irq, "pcilvl", 1); priv->irq_map[i] = ofdev->archdata.irqs[i]; } priv->virq_err = priv->irq_map[0]; if (priv->irq_mode & 1) priv->virq_dma = ofdev->archdata.irqs[4]; else priv->virq_dma = priv->irq_map[0]; /* Unmask all PCI interrupts, request_irq will not do that */ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf)); } /* Setup IRQ handler for non-configuration space access errors */ err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED, "GRPCI2_ERR", priv); if (err) { printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err); goto err5; } /* * Enable Error Interrupts. PCI interrupts are unmasked once request_irq * is called by the PCI Device drivers */ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI); /* Init common layer and scan buses */ priv->info.ops = &grpci2_ops; priv->info.map_irq = grpci2_map_irq; leon_pci_init(ofdev, &priv->info); return 0; err5: release_resource(&priv->info.io_space); err4: release_resource(&priv->info.mem_space); err3: err = -ENOMEM; iounmap((void __iomem *)priv->pci_io_va); err2: kfree(priv); err1: of_iounmap(&ofdev->resource[0], regs, resource_size(&ofdev->resource[0])); return err; } static struct of_device_id grpci2_of_match[] = { { .name = "GAISLER_GRPCI2", }, { .name = "01_07c", }, {}, }; static struct platform_driver grpci2_of_driver = { .driver = { .name = "grpci2", .of_match_table = grpci2_of_match, }, .probe = grpci2_of_probe, }; static int __init grpci2_init(void) { return platform_driver_register(&grpci2_of_driver); } subsys_initcall(grpci2_init);
gpl-2.0
bravonova/kenzo
fs/fat/cache.c
1174
8791
/* * linux/fs/fat/cache.c * * Written 1992,1993 by Werner Almesberger * * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead * of inode number. * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include "fat.h" /* this must be > 0. */ #define FAT_MAX_CACHE 8 struct fat_cache { struct list_head cache_list; int nr_contig; /* number of contiguous clusters */ int fcluster; /* cluster number in the file. */ int dcluster; /* cluster number on disk. */ }; struct fat_cache_id { unsigned int id; int nr_contig; int fcluster; int dcluster; }; static inline int fat_max_cache(struct inode *inode) { return FAT_MAX_CACHE; } static struct kmem_cache *fat_cache_cachep; static void init_once(void *foo) { struct fat_cache *cache = (struct fat_cache *)foo; INIT_LIST_HEAD(&cache->cache_list); } int __init fat_cache_init(void) { fat_cache_cachep = kmem_cache_create("fat_cache", sizeof(struct fat_cache), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (fat_cache_cachep == NULL) return -ENOMEM; return 0; } void fat_cache_destroy(void) { kmem_cache_destroy(fat_cache_cachep); } static inline struct fat_cache *fat_cache_alloc(struct inode *inode) { return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS); } static inline void fat_cache_free(struct fat_cache *cache) { BUG_ON(!list_empty(&cache->cache_list)); kmem_cache_free(fat_cache_cachep, cache); } static inline void fat_cache_update_lru(struct inode *inode, struct fat_cache *cache) { if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); } static int fat_cache_lookup(struct inode *inode, int fclus, struct fat_cache_id *cid, int *cached_fclus, int *cached_dclus) { static struct fat_cache nohit = { .fcluster = 0, }; struct fat_cache *hit = &nohit, *p; int offset = -1; spin_lock(&MSDOS_I(inode)->cache_lru_lock); list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { /* Find the cache of "fclus" or nearest cache. */ if (p->fcluster <= fclus && hit->fcluster < p->fcluster) { hit = p; if ((hit->fcluster + hit->nr_contig) < fclus) { offset = hit->nr_contig; } else { offset = fclus - hit->fcluster; break; } } } if (hit != &nohit) { fat_cache_update_lru(inode, hit); cid->id = MSDOS_I(inode)->cache_valid_id; cid->nr_contig = hit->nr_contig; cid->fcluster = hit->fcluster; cid->dcluster = hit->dcluster; *cached_fclus = cid->fcluster + offset; *cached_dclus = cid->dcluster + offset; } spin_unlock(&MSDOS_I(inode)->cache_lru_lock); return offset; } static struct fat_cache *fat_cache_merge(struct inode *inode, struct fat_cache_id *new) { struct fat_cache *p; list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { /* Find the same part as "new" in cluster-chain. */ if (p->fcluster == new->fcluster) { BUG_ON(p->dcluster != new->dcluster); if (new->nr_contig > p->nr_contig) p->nr_contig = new->nr_contig; return p; } } return NULL; } static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) { struct fat_cache *cache, *tmp; if (new->fcluster == -1) /* dummy cache */ return; spin_lock(&MSDOS_I(inode)->cache_lru_lock); if (new->id != FAT_CACHE_VALID && new->id != MSDOS_I(inode)->cache_valid_id) goto out; /* this cache was invalidated */ cache = fat_cache_merge(inode, new); if (cache == NULL) { if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { MSDOS_I(inode)->nr_caches++; spin_unlock(&MSDOS_I(inode)->cache_lru_lock); tmp = fat_cache_alloc(inode); if (!tmp) { spin_lock(&MSDOS_I(inode)->cache_lru_lock); MSDOS_I(inode)->nr_caches--; spin_unlock(&MSDOS_I(inode)->cache_lru_lock); return; } spin_lock(&MSDOS_I(inode)->cache_lru_lock); cache = fat_cache_merge(inode, new); if (cache != NULL) { MSDOS_I(inode)->nr_caches--; fat_cache_free(tmp); goto out_update_lru; } cache = tmp; } else { struct list_head *p = MSDOS_I(inode)->cache_lru.prev; cache = list_entry(p, struct fat_cache, cache_list); } cache->fcluster = new->fcluster; cache->dcluster = new->dcluster; cache->nr_contig = new->nr_contig; } out_update_lru: fat_cache_update_lru(inode, cache); out: spin_unlock(&MSDOS_I(inode)->cache_lru_lock); } /* * Cache invalidation occurs rarely, thus the LRU chain is not updated. It * fixes itself after a while. */ static void __fat_cache_inval_inode(struct inode *inode) { struct msdos_inode_info *i = MSDOS_I(inode); struct fat_cache *cache; while (!list_empty(&i->cache_lru)) { cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list); list_del_init(&cache->cache_list); i->nr_caches--; fat_cache_free(cache); } /* Update. The copy of caches before this id is discarded. */ i->cache_valid_id++; if (i->cache_valid_id == FAT_CACHE_VALID) i->cache_valid_id++; } void fat_cache_inval_inode(struct inode *inode) { spin_lock(&MSDOS_I(inode)->cache_lru_lock); __fat_cache_inval_inode(inode); spin_unlock(&MSDOS_I(inode)->cache_lru_lock); } static inline int cache_contiguous(struct fat_cache_id *cid, int dclus) { cid->nr_contig++; return ((cid->dcluster + cid->nr_contig) == dclus); } static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) { cid->id = FAT_CACHE_VALID; cid->fcluster = fclus; cid->dcluster = dclus; cid->nr_contig = 0; } int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) { struct super_block *sb = inode->i_sb; const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; struct fat_entry fatent; struct fat_cache_id cid; int nr; BUG_ON(MSDOS_I(inode)->i_start == 0); *fclus = 0; *dclus = MSDOS_I(inode)->i_start; if (cluster == 0) return 0; if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { /* * dummy, always not contiguous * This is reinitialized by cache_init(), later. */ cache_init(&cid, -1, -1); } fatent_init(&fatent); while (*fclus < cluster) { /* prevent the infinite loop of cluster chain */ if (*fclus > limit) { fat_fs_error_ratelimit(sb, "%s: detected the cluster chain loop" " (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } nr = fat_ent_read(inode, &fatent, *dclus); if (nr < 0) goto out; else if (nr == FAT_ENT_FREE) { fat_fs_error_ratelimit(sb, "%s: invalid cluster chain (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } else if (nr == FAT_ENT_EOF) { fat_cache_add(inode, &cid); goto out; } (*fclus)++; *dclus = nr; if (!cache_contiguous(&cid, *dclus)) cache_init(&cid, *fclus, *dclus); } nr = 0; fat_cache_add(inode, &cid); out: fatent_brelse(&fatent); return nr; } static int fat_bmap_cluster(struct inode *inode, int cluster) { struct super_block *sb = inode->i_sb; int ret, fclus, dclus; if (MSDOS_I(inode)->i_start == 0) return 0; ret = fat_get_cluster(inode, cluster, &fclus, &dclus); if (ret < 0) return ret; else if (ret == FAT_ENT_EOF) { fat_fs_error_ratelimit(sb, "%s: request beyond EOF (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); return -EIO; } return dclus; } int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, unsigned long *mapped_blocks, int create) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); const unsigned long blocksize = sb->s_blocksize; const unsigned char blocksize_bits = sb->s_blocksize_bits; sector_t last_block; int cluster, offset; *phys = 0; *mapped_blocks = 0; if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { *phys = sector + sbi->dir_start; *mapped_blocks = 1; } return 0; } last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; if (sector >= last_block) { if (!create) return 0; /* * ->mmu_private can access on only allocation path. * (caller must hold ->i_mutex) */ last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1)) >> blocksize_bits; if (sector >= last_block) return 0; } cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); offset = sector & (sbi->sec_per_clus - 1); cluster = fat_bmap_cluster(inode, cluster); if (cluster < 0) return cluster; else if (cluster) { *phys = fat_clus_to_blknr(sbi, cluster) + offset; *mapped_blocks = sbi->sec_per_clus - offset; if (*mapped_blocks > last_block - sector) *mapped_blocks = last_block - sector; } return 0; }
gpl-2.0
assusdan/cyanogenmod_kernel_hs_zerasrs
tools/power/x86/turbostat/turbostat.c
1686
58458
/* * turbostat -- show CPU frequency and C-state residency * on modern Intel turbo-capable processors. * * Copyright (c) 2012 Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #define _GNU_SOURCE #include MSRHEADER #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <stdlib.h> #include <dirent.h> #include <string.h> #include <ctype.h> #include <sched.h> #include <cpuid.h> char *proc_stat = "/proc/stat"; unsigned int interval_sec = 5; /* set with -i interval_sec */ unsigned int verbose; /* set with -v */ unsigned int rapl_verbose; /* set with -R */ unsigned int thermal_verbose; /* set with -T */ unsigned int summary_only; /* set with -s */ unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; unsigned int do_c8_c9_c10; unsigned int has_aperf; unsigned int has_epb; unsigned int units = 1000000000; /* Ghz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; unsigned int do_nehalem_platform_info; unsigned int do_nehalem_turbo_ratio_limit; unsigned int do_ivt_turbo_ratio_limit; unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; int do_smi; double bclk; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; unsigned int tcc_activation_temp; unsigned int tcc_activation_temp_override; double rapl_power_units, rapl_energy_units, rapl_time_units; double rapl_joule_counter_range; #define RAPL_PKG (1 << 0) #define RAPL_CORES (1 << 1) #define RAPL_GFX (1 << 2) #define RAPL_DRAM (1 << 3) #define RAPL_PKG_PERF_STATUS (1 << 4) #define RAPL_DRAM_PERF_STATUS (1 << 5) #define TJMAX_DEFAULT 100 #define MAX(a, b) ((a) > (b) ? (a) : (b)) int aperf_mperf_unstable; int backwards_count; char *progname; cpu_set_t *cpu_present_set, *cpu_affinity_set; size_t cpu_present_setsize, cpu_affinity_setsize; struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; unsigned long long c1; /* derived */ unsigned long long extra_msr64; unsigned long long extra_delta64; unsigned long long extra_msr32; unsigned long long extra_delta32; unsigned int smi_count; unsigned int cpu_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 } *thread_even, *thread_odd; struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; unsigned int core_temp_c; unsigned int core_id; } *core_even, *core_odd; struct pkg_data { unsigned long long pc2; unsigned long long pc3; unsigned long long pc6; unsigned long long pc7; unsigned long long pc8; unsigned long long pc9; unsigned long long pc10; unsigned int package_id; unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ unsigned int pkg_temp_c; } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd #define EVEN_COUNTERS thread_even, core_even, package_even #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ topo.num_threads_per_core + \ (core_no) * topo.num_threads_per_core + (thread_no)) #define GET_CORE(core_base, core_no, pkg_no) \ (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) struct system_summary { struct thread_data threads; struct core_data cores; struct pkg_data packages; } sum, average; struct topo_params { int num_packages; int num_cpus; int num_cores; int max_cpu_num; int num_cores_per_pkg; int num_threads_per_core; } topo; struct timeval tv_even, tv_odd, tv_delta; void setup_all_buffers(void); int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { int retval, pkg_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { for (thread_no = 0; thread_no < topo.num_threads_per_core; ++thread_no) { struct thread_data *t; struct core_data *c; struct pkg_data *p; t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; c = GET_CORE(core_base, core_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); retval = func(t, c, p); if (retval) return retval; } } } return 0; } int cpu_migrate(int cpu) { CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) return -1; else return 0; } int get_msr(int cpu, off_t offset, unsigned long long *msr) { ssize_t retval; char pathname[32]; int fd; sprintf(pathname, "/dev/cpu/%d/msr", cpu); fd = open(pathname, O_RDONLY); if (fd < 0) return -1; retval = pread(fd, msr, sizeof *msr, offset); close(fd); if (retval != sizeof *msr) { fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset); return -1; } return 0; } void print_header(void) { if (show_pkg) outp += sprintf(outp, "pk"); if (show_pkg) outp += sprintf(outp, " "); if (show_core) outp += sprintf(outp, "cor"); if (show_cpu) outp += sprintf(outp, " CPU"); if (show_pkg || show_core || show_cpu) outp += sprintf(outp, " "); if (do_nhm_cstates) outp += sprintf(outp, " %%c0"); if (has_aperf) outp += sprintf(outp, " GHz"); outp += sprintf(outp, " TSC"); if (do_smi) outp += sprintf(outp, " SMI"); if (extra_delta_offset32) outp += sprintf(outp, " count 0x%03X", extra_delta_offset32); if (extra_delta_offset64) outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64); if (extra_msr_offset32) outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32); if (extra_msr_offset64) outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); if (do_nhm_cstates) outp += sprintf(outp, " %%c1"); if (do_nhm_cstates) outp += sprintf(outp, " %%c3"); if (do_nhm_cstates) outp += sprintf(outp, " %%c6"); if (do_snb_cstates) outp += sprintf(outp, " %%c7"); if (do_dts) outp += sprintf(outp, " CTMP"); if (do_ptm) outp += sprintf(outp, " PTMP"); if (do_snb_cstates) outp += sprintf(outp, " %%pc2"); if (do_nhm_cstates) outp += sprintf(outp, " %%pc3"); if (do_nhm_cstates) outp += sprintf(outp, " %%pc6"); if (do_snb_cstates) outp += sprintf(outp, " %%pc7"); if (do_c8_c9_c10) { outp += sprintf(outp, " %%pc8"); outp += sprintf(outp, " %%pc9"); outp += sprintf(outp, " %%pc10"); } if (do_rapl & RAPL_PKG) outp += sprintf(outp, " Pkg_W"); if (do_rapl & RAPL_CORES) outp += sprintf(outp, " Cor_W"); if (do_rapl & RAPL_GFX) outp += sprintf(outp, " GFX_W"); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, " RAM_W"); if (do_rapl & RAPL_PKG_PERF_STATUS) outp += sprintf(outp, " PKG_%%"); if (do_rapl & RAPL_DRAM_PERF_STATUS) outp += sprintf(outp, " RAM_%%"); outp += sprintf(outp, "\n"); } int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { fprintf(stderr, "t %p, c %p, p %p\n", t, c, p); if (t) { fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags); fprintf(stderr, "TSC: %016llX\n", t->tsc); fprintf(stderr, "aperf: %016llX\n", t->aperf); fprintf(stderr, "mperf: %016llX\n", t->mperf); fprintf(stderr, "c1: %016llX\n", t->c1); fprintf(stderr, "msr0x%x: %08llX\n", extra_delta_offset32, t->extra_delta32); fprintf(stderr, "msr0x%x: %016llX\n", extra_delta_offset64, t->extra_delta64); fprintf(stderr, "msr0x%x: %08llX\n", extra_msr_offset32, t->extra_msr32); fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset64, t->extra_msr64); if (do_smi) fprintf(stderr, "SMI: %08X\n", t->smi_count); } if (c) { fprintf(stderr, "core: %d\n", c->core_id); fprintf(stderr, "c3: %016llX\n", c->c3); fprintf(stderr, "c6: %016llX\n", c->c6); fprintf(stderr, "c7: %016llX\n", c->c7); fprintf(stderr, "DTS: %dC\n", c->core_temp_c); } if (p) { fprintf(stderr, "package: %d\n", p->package_id); fprintf(stderr, "pc2: %016llX\n", p->pc2); fprintf(stderr, "pc3: %016llX\n", p->pc3); fprintf(stderr, "pc6: %016llX\n", p->pc6); fprintf(stderr, "pc7: %016llX\n", p->pc7); fprintf(stderr, "pc8: %016llX\n", p->pc8); fprintf(stderr, "pc9: %016llX\n", p->pc9); fprintf(stderr, "pc10: %016llX\n", p->pc10); fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg); fprintf(stderr, "Joules COR: %0X\n", p->energy_cores); fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx); fprintf(stderr, "Joules RAM: %0X\n", p->energy_dram); fprintf(stderr, "Throttle PKG: %0X\n", p->rapl_pkg_perf_status); fprintf(stderr, "Throttle RAM: %0X\n", p->rapl_dram_perf_status); fprintf(stderr, "PTM: %dC\n", p->pkg_temp_c); } return 0; } /* * column formatting convention & formats * package: "pk" 2 columns %2d * core: "cor" 3 columns %3d * CPU: "CPU" 3 columns %3d * Pkg_W: %6.2 * Cor_W: %6.2 * GFX_W: %5.2 * RAM_W: %5.2 * GHz: "GHz" 3 columns %3.2 * TSC: "TSC" 3 columns %3.2 * SMI: "SMI" 4 columns %4d * percentage " %pc3" %6.2 * Perf Status percentage: %5.2 * "CTMP" 4 columns %4d */ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { double interval_float; char *fmt5, *fmt6; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; /* if showing only 1st thread in pkg and this isn't one, bail out */ if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { if (show_pkg) outp += sprintf(outp, " "); if (show_pkg && show_core) outp += sprintf(outp, " "); if (show_core) outp += sprintf(outp, " "); if (show_cpu) outp += sprintf(outp, " " " "); } else { if (show_pkg) { if (p) outp += sprintf(outp, "%2d", p->package_id); else outp += sprintf(outp, " "); } if (show_pkg && show_core) outp += sprintf(outp, " "); if (show_core) { if (c) outp += sprintf(outp, "%3d", c->core_id); else outp += sprintf(outp, " "); } if (show_cpu) outp += sprintf(outp, " %3d", t->cpu_id); } /* %c0 */ if (do_nhm_cstates) { if (show_pkg || show_core || show_cpu) outp += sprintf(outp, " "); if (!skip_c0) outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc); else outp += sprintf(outp, " ****"); } /* GHz */ if (has_aperf) { if (!aperf_mperf_unstable) { outp += sprintf(outp, " %3.2f", 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); } else { if (t->aperf > t->tsc || t->mperf > t->tsc) { outp += sprintf(outp, " ***"); } else { outp += sprintf(outp, "%3.1f*", 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); } } } /* TSC */ outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float); /* SMI */ if (do_smi) outp += sprintf(outp, "%4d", t->smi_count); /* delta */ if (extra_delta_offset32) outp += sprintf(outp, " %11llu", t->extra_delta32); /* DELTA */ if (extra_delta_offset64) outp += sprintf(outp, " %11llu", t->extra_delta64); /* msr */ if (extra_msr_offset32) outp += sprintf(outp, " 0x%08llx", t->extra_msr32); /* MSR */ if (extra_msr_offset64) outp += sprintf(outp, " 0x%016llx", t->extra_msr64); if (do_nhm_cstates) { if (!skip_c1) outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc); else outp += sprintf(outp, " ****"); } /* print per-core data only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; if (do_nhm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc); if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc); if (do_dts) outp += sprintf(outp, " %4d", c->core_temp_c); /* print per-package data only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) goto done; if (do_ptm) outp += sprintf(outp, " %4d", p->pkg_temp_c); if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); if (do_c8_c9_c10) { outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc); outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc); outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc); } /* * If measurement interval exceeds minimum RAPL Joule Counter range, * indicate that results are suspect by printing "**" in fraction place. */ if (interval_float < rapl_joule_counter_range) { fmt5 = " %5.2f"; fmt6 = " %6.2f"; } else { fmt5 = " %3.0f**"; fmt6 = " %4.0f**"; } if (do_rapl & RAPL_PKG) outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float); if (do_rapl & RAPL_CORES) outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float); if (do_rapl & RAPL_GFX) outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float); if (do_rapl & RAPL_DRAM) outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float); if (do_rapl & RAPL_PKG_PERF_STATUS ) outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); if (do_rapl & RAPL_DRAM_PERF_STATUS ) outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); done: outp += sprintf(outp, "\n"); return 0; } void flush_stdout() { fputs(output_buffer, stdout); fflush(stdout); outp = output_buffer; } void flush_stderr() { fputs(output_buffer, stderr); outp = output_buffer; } void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { static int printed; if (!printed || !summary_only) print_header(); if (topo.num_cpus > 1) format_counters(&average.threads, &average.cores, &average.packages); printed = 1; if (summary_only) return; for_all_cpus(format_counters, t, c, p); } #define DELTA_WRAP32(new, old) \ if (new > old) { \ old = new - old; \ } else { \ old = 0x100000000 + new - old; \ } void delta_package(struct pkg_data *new, struct pkg_data *old) { old->pc2 = new->pc2 - old->pc2; old->pc3 = new->pc3 - old->pc3; old->pc6 = new->pc6 - old->pc6; old->pc7 = new->pc7 - old->pc7; old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; old->pc10 = new->pc10 - old->pc10; old->pkg_temp_c = new->pkg_temp_c; DELTA_WRAP32(new->energy_pkg, old->energy_pkg); DELTA_WRAP32(new->energy_cores, old->energy_cores); DELTA_WRAP32(new->energy_gfx, old->energy_gfx); DELTA_WRAP32(new->energy_dram, old->energy_dram); DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); } void delta_core(struct core_data *new, struct core_data *old) { old->c3 = new->c3 - old->c3; old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; } /* * old = new - old */ void delta_thread(struct thread_data *new, struct thread_data *old, struct core_data *core_delta) { old->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ if (old->tsc < (1000 * 1000)) { fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n"); fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n"); fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n"); exit(-3); } old->c1 = new->c1 - old->c1; if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; } else { if (!aperf_mperf_unstable) { fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname); fprintf(stderr, "* Frequency results do not cover entire interval *\n"); fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n"); aperf_mperf_unstable = 1; } /* * mperf delta is likely a huge "positive" number * can not use it for calculating c0 time */ skip_c0 = 1; skip_c1 = 1; } /* * As counter collection is not atomic, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) old->c1 = 0; else { /* normal case, derive c1 */ old->c1 = old->tsc - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } if (old->mperf == 0) { if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id); old->mperf = 1; /* divide by 0 protection */ } old->extra_delta32 = new->extra_delta32 - old->extra_delta32; old->extra_delta32 &= 0xFFFFFFFF; old->extra_delta64 = new->extra_delta64 - old->extra_delta64; /* * Extra MSR is just a snapshot, simply copy latest w/o subtracting */ old->extra_msr32 = new->extra_msr32; old->extra_msr64 = new->extra_msr64; if (do_smi) old->smi_count = new->smi_count - old->smi_count; } int delta_cpu(struct thread_data *t, struct core_data *c, struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2) { /* calculate core delta only for 1st thread in core */ if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) delta_core(c, c2); /* always calculate thread delta */ delta_thread(t, t2, c2); /* c2 is core delta */ /* calculate package delta only for 1st core in package */ if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) delta_package(p, p2); return 0; } void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { t->tsc = 0; t->aperf = 0; t->mperf = 0; t->c1 = 0; t->smi_count = 0; t->extra_delta32 = 0; t->extra_delta64 = 0; /* tells format_counters to dump all fields from this set */ t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; c->c3 = 0; c->c6 = 0; c->c7 = 0; c->core_temp_c = 0; p->pc2 = 0; p->pc3 = 0; p->pc6 = 0; p->pc7 = 0; p->pc8 = 0; p->pc9 = 0; p->pc10 = 0; p->energy_pkg = 0; p->energy_dram = 0; p->energy_cores = 0; p->energy_gfx = 0; p->rapl_pkg_perf_status = 0; p->rapl_dram_perf_status = 0; p->pkg_temp_c = 0; } int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { average.threads.tsc += t->tsc; average.threads.aperf += t->aperf; average.threads.mperf += t->mperf; average.threads.c1 += t->c1; average.threads.extra_delta32 += t->extra_delta32; average.threads.extra_delta64 += t->extra_delta64; /* sum per-core values only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; average.cores.c3 += c->c3; average.cores.c6 += c->c6; average.cores.c7 += c->c7; average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); /* sum per-pkg values only for 1st core in pkg */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; average.packages.pc2 += p->pc2; average.packages.pc3 += p->pc3; average.packages.pc6 += p->pc6; average.packages.pc7 += p->pc7; average.packages.pc8 += p->pc8; average.packages.pc9 += p->pc9; average.packages.pc10 += p->pc10; average.packages.energy_pkg += p->energy_pkg; average.packages.energy_dram += p->energy_dram; average.packages.energy_cores += p->energy_cores; average.packages.energy_gfx += p->energy_gfx; average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; return 0; } /* * sum the counters for all cpus in the system * compute the weighted average */ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p) { clear_counters(&average.threads, &average.cores, &average.packages); for_all_cpus(sum_counters, t, c, p); average.threads.tsc /= topo.num_cpus; average.threads.aperf /= topo.num_cpus; average.threads.mperf /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; average.threads.extra_delta32 /= topo.num_cpus; average.threads.extra_delta32 &= 0xFFFFFFFF; average.threads.extra_delta64 /= topo.num_cpus; average.cores.c3 /= topo.num_cores; average.cores.c6 /= topo.num_cores; average.cores.c7 /= topo.num_cores; average.packages.pc2 /= topo.num_packages; average.packages.pc3 /= topo.num_packages; average.packages.pc6 /= topo.num_packages; average.packages.pc7 /= topo.num_packages; average.packages.pc8 /= topo.num_packages; average.packages.pc9 /= topo.num_packages; average.packages.pc10 /= topo.num_packages; } static unsigned long long rdtsc(void) { unsigned int low, high; asm volatile("rdtsc" : "=a" (low), "=d" (high)); return low | ((unsigned long long)high) << 32; } /* * get_counters(...) * migrate to cpu * acquire and record local counters for that cpu */ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int cpu = t->cpu_id; unsigned long long msr; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } t->tsc = rdtsc(); /* we are running on local CPU of interest */ if (has_aperf) { if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; } if (do_smi) { if (get_msr(cpu, MSR_SMI_COUNT, &msr)) return -5; t->smi_count = msr & 0xFFFFFFFF; } if (extra_delta_offset32) { if (get_msr(cpu, extra_delta_offset32, &msr)) return -5; t->extra_delta32 = msr & 0xFFFFFFFF; } if (extra_delta_offset64) if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64)) return -5; if (extra_msr_offset32) { if (get_msr(cpu, extra_msr_offset32, &msr)) return -5; t->extra_msr32 = msr & 0xFFFFFFFF; } if (extra_msr_offset64) if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) return -5; /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; if (do_nhm_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } if (do_snb_cstates) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; if (do_dts) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } /* collect package counters only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (do_nhm_cstates) { if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) return -10; } if (do_snb_cstates) { if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) return -11; if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; } if (do_c8_c9_c10) { if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) return -13; if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) return -13; if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; p->energy_pkg = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_CORES) { if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) return -14; p->energy_cores = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) return -15; p->energy_dram = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_GFX) { if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) return -16; p->energy_gfx = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_PKG_PERF_STATUS) { if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) return -16; p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; } if (do_rapl & RAPL_DRAM_PERF_STATUS) { if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) return -16; p->rapl_dram_perf_status = msr & 0xFFFFFFFF; } if (do_ptm) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } return 0; } void print_verbose_header(void) { unsigned long long msr; unsigned int ratio; if (!do_nehalem_platform_info) return; get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); ratio = (msr >> 40) & 0xFF; fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n", ratio, bclk, ratio * bclk); get_msr(0, MSR_IA32_POWER_CTL, &msr); fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n", msr, msr & 0x2 ? "EN" : "DIS"); if (!do_ivt_turbo_ratio_limit) goto print_nhm_turbo_ratio_limits; get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); ratio = (msr >> 56) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); print_nhm_turbo_ratio_limits: get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr); fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 7); switch(msr & 0x7) { case 0: fprintf(stderr, "pc0"); break; case 1: fprintf(stderr, do_snb_cstates ? "pc2" : "pc0"); break; case 2: fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3"); break; case 3: fprintf(stderr, "pc6"); break; case 4: fprintf(stderr, "pc7"); break; case 5: fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid"); break; case 7: fprintf(stderr, "unlimited"); break; default: fprintf(stderr, "invalid"); } fprintf(stderr, ")\n"); if (!do_nehalem_turbo_ratio_limit) return; get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); ratio = (msr >> 56) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", ratio, bclk, ratio * bclk); } void free_all_buffers(void) { CPU_FREE(cpu_present_set); cpu_present_set = NULL; cpu_present_set = 0; CPU_FREE(cpu_affinity_set); cpu_affinity_set = NULL; cpu_affinity_setsize = 0; free(thread_even); free(core_even); free(package_even); thread_even = NULL; core_even = NULL; package_even = NULL; free(thread_odd); free(core_odd); free(package_odd); thread_odd = NULL; core_odd = NULL; package_odd = NULL; free(output_buffer); output_buffer = NULL; outp = NULL; } /* * cpu_is_first_sibling_in_core(cpu) * return 1 if given CPU is 1st HT sibling in the core */ int cpu_is_first_sibling_in_core(int cpu) { char path[64]; FILE *filep; int first_cpu; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } fscanf(filep, "%d", &first_cpu); fclose(filep); return (cpu == first_cpu); } /* * cpu_is_first_core_in_package(cpu) * return 1 if given CPU is 1st core in package */ int cpu_is_first_core_in_package(int cpu) { char path[64]; FILE *filep; int first_cpu; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } fscanf(filep, "%d", &first_cpu); fclose(filep); return (cpu == first_cpu); } int get_physical_package_id(int cpu) { char path[80]; FILE *filep; int pkg; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } fscanf(filep, "%d", &pkg); fclose(filep); return pkg; } int get_core_id(int cpu) { char path[80]; FILE *filep; int core; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } fscanf(filep, "%d", &core); fclose(filep); return core; } int get_num_ht_siblings(int cpu) { char path[80]; FILE *filep; int sib1, sib2; int matches; char character; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen(path, "r"); if (filep == NULL) { perror(path); exit(1); } /* * file format: * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) * otherwinse 1 sibling (self). */ matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); fclose(filep); if (matches == 3) return 2; else return 1; } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, struct pkg_data *, struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, struct thread_data *thread_base2, struct core_data *core_base2, struct pkg_data *pkg_base2) { int retval, pkg_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { for (thread_no = 0; thread_no < topo.num_threads_per_core; ++thread_no) { struct thread_data *t, *t2; struct core_data *c, *c2; struct pkg_data *p, *p2; t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); c = GET_CORE(core_base, core_no, pkg_no); c2 = GET_CORE(core_base2, core_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); p2 = GET_PKG(pkg_base2, pkg_no); retval = func(t, c, p, t2, c2, p2); if (retval) return retval; } } } return 0; } /* * run func(cpu) on every cpu in /proc/stat * return max_cpu number */ int for_all_proc_cpus(int (func)(int)) { FILE *fp; int cpu_num; int retval; fp = fopen(proc_stat, "r"); if (fp == NULL) { perror(proc_stat); exit(1); } retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) { perror("/proc/stat format"); exit(1); } while (1) { retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); if (retval != 1) break; retval = func(cpu_num); if (retval) { fclose(fp); return(retval); } } fclose(fp); return 0; } void re_initialize(void) { free_all_buffers(); setup_all_buffers(); printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); } /* * count_cpus() * remember the last one seen, it will be the max */ int count_cpus(int cpu) { if (topo.max_cpu_num < cpu) topo.max_cpu_num = cpu; topo.num_cpus += 1; return 0; } int mark_cpu_present(int cpu) { CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); return 0; } void turbostat_loop() { int retval; int restarted = 0; restart: restarted++; retval = for_all_cpus(get_counters, EVEN_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { if (restarted > 1) { exit(retval); } re_initialize(); goto restart; } restarted = 0; gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { if (for_all_proc_cpus(cpu_is_not_present)) { re_initialize(); goto restart; } sleep(interval_sec); retval = for_all_cpus(get_counters, ODD_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_stdout(); sleep(interval_sec); retval = for_all_cpus(get_counters, EVEN_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_even, (struct timezone *)NULL); timersub(&tv_even, &tv_odd, &tv_delta); for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS); compute_average(ODD_COUNTERS); format_all_counters(ODD_COUNTERS); flush_stdout(); } } void check_dev_msr() { struct stat sb; if (stat("/dev/cpu/0/msr", &sb)) { fprintf(stderr, "no /dev/cpu/0/msr\n"); fprintf(stderr, "Try \"# modprobe msr\"\n"); exit(-5); } } void check_super_user() { if (getuid() != 0) { fprintf(stderr, "must be root\n"); exit(-6); } } int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case 0x25: /* Westmere Client - Clarkdale, Arrandale */ case 0x2C: /* Westmere EP - Gulftown */ case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ return 1; case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ default: return 0; } } int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case 0x3E: /* IVB Xeon */ return 1; default: return 0; } } /* * print_epb() * Decode the ENERGY_PERF_BIAS MSR */ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; char *epb_string; int cpu; if (!has_epb) return 0; cpu = t->cpu_id; /* EPB is per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) return 0; switch (msr & 0x7) { case ENERGY_PERF_BIAS_PERFORMANCE: epb_string = "performance"; break; case ENERGY_PERF_BIAS_NORMAL: epb_string = "balanced"; break; case ENERGY_PERF_BIAS_POWERSAVE: epb_string = "powersave"; break; default: epb_string = "custom"; break; } fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); return 0; } #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ /* * rapl_probe() * * sets do_rapl */ void rapl_probe(unsigned int family, unsigned int model) { unsigned long long msr; double tdp; if (!genuine_intel) return; if (family != 6) return; switch (model) { case 0x2A: case 0x3A: case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; break; case 0x2D: case 0x3E: do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS; break; default: return; } /* units on package 0, verify later other packages match */ if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF)); /* get TDP to determine energy counter range */ if (get_msr(0, MSR_PKG_POWER_INFO, &msr)) return; tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (verbose) fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range); return; } int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int dts; int cpu; if (!(do_dts || do_ptm)) return 0; cpu = t->cpu_id; /* DTS is per-core, no need to print for each thread */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tcc_activation_temp - dts); #ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); #endif } if (do_dts) { unsigned int resolution; if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; resolution = (msr >> 27) & 0xF; fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tcc_activation_temp - dts, resolution); #ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); #endif } return 0; } void print_power_limit_msr(int cpu, unsigned long long msr, char *label) { fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", cpu, label, ((msr >> 15) & 1) ? "EN" : "DIS", ((msr >> 0) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, (((msr >> 16) & 1) ? "EN" : "DIS")); return; } int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; double local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units; if (!do_rapl) return 0; /* RAPL counters are per package, so print only for 1st thread/package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; local_rapl_power_units = 1.0 / (1 << (msr & 0xF)); local_rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); local_rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF)); if (local_rapl_power_units != rapl_power_units) fprintf(stderr, "cpu%d, ERROR: Power units mis-match\n", cpu); if (local_rapl_energy_units != rapl_energy_units) fprintf(stderr, "cpu%d, ERROR: Energy units mis-match\n", cpu); if (local_rapl_time_units != rapl_time_units) fprintf(stderr, "cpu%d, ERROR: Time units mis-match\n", cpu); if (verbose) { fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units); } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) return -5; fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 63) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "PKG Limit #1"); fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", cpu, ((msr >> 47) & 1) ? "EN" : "DIS", ((msr >> 32) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, ((msr >> 48) & 1) ? "EN" : "DIS"); } if (do_rapl & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) return -6; fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } if (do_rapl & RAPL_CORES) { if (verbose) { if (get_msr(cpu, MSR_PP0_POLICY, &msr)) return -7; fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "Cores Limit"); } } if (do_rapl & RAPL_GFX) { if (verbose) { if (get_msr(cpu, MSR_PP1_POLICY, &msr)) return -8; fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) return -9; fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "": "UN"); print_power_limit_msr(cpu, msr, "GFX Limit"); } } return 0; } int is_snb(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x2A: case 0x2D: case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ return 1; } return 0; } int has_c8_c9_c10(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { case 0x45: return 1; } return 0; } double discover_bclk(unsigned int family, unsigned int model) { if (is_snb(family, model)) return 100.00; else return 133.33; } /* * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where * the Thermal Control Circuit (TCC) activates. * This is usually equal to tjMax. * * Older processors do not have this MSR, so there we guess, * but also allow cmdline over-ride with -T. * * Several MSR temperature values are in units of degrees-C * below this value, including the Digital Thermal Sensor (DTS), * Package Thermal Management Sensor (PTM), and thermal event thresholds. */ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int target_c_local; int cpu; /* tcc_activation_temp is used only for dts or ptm */ if (!(do_dts || do_ptm)) return 0; /* this is a per-package concept */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; } if (tcc_activation_temp_override != 0) { tcc_activation_temp = tcc_activation_temp_override; fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n", cpu, tcc_activation_temp); return 0; } /* Temperature Target MSR is Nehalem and newer only */ if (!do_nehalem_platform_info) goto guess; if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) goto guess; target_c_local = (msr >> 16) & 0x7F; if (verbose) fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, target_c_local); if (target_c_local < 85 || target_c_local > 120) goto guess; tcc_activation_temp = target_c_local; return 0; guess: tcc_activation_temp = TJMAX_DEFAULT; fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", cpu, tcc_activation_temp); return 0; } void check_cpuid() { unsigned int eax, ebx, ecx, edx, max_level; unsigned int fms, family, model, stepping; eax = ebx = ecx = edx = 0; __get_cpuid(0, &max_level, &ebx, &ecx, &edx); if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; if (verbose) fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); __get_cpuid(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (verbose) fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); if (!(edx & (1 << 5))) { fprintf(stderr, "CPUID: no MSR\n"); exit(1); } /* * check max extended function levels of CPUID. * This is needed to check for invariant TSC. * This check is valid for both Intel and AMD. */ ebx = ecx = edx = 0; __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx); if (max_level < 0x80000007) { fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level); exit(1); } /* * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 * this check is valid for both Intel and AMD */ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx); has_invariant_tsc = edx & (1 << 8); if (!has_invariant_tsc) { fprintf(stderr, "No invariant TSC\n"); exit(1); } /* * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 * this check is valid for both Intel and AMD */ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); has_aperf = ecx & (1 << 0); do_dts = eax & (1 << 0); do_ptm = eax & (1 << 6); has_epb = ecx & (1 << 3); if (verbose) fprintf(stderr, "CPUID(6): %s%s%s%s\n", has_aperf ? "APERF" : "No APERF!", do_dts ? ", DTS" : "", do_ptm ? ", PTM": "", has_epb ? ", EPB": ""); if (!has_aperf) exit(-1); do_nehalem_platform_info = genuine_intel && has_invariant_tsc; do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ do_smi = do_nhm_cstates; do_snb_cstates = is_snb(family, model); do_c8_c9_c10 = has_c8_c9_c10(family, model); bclk = discover_bclk(family, model); do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model); rapl_probe(family, model); return; } void usage() { fprintf(stderr, "%s: [-v][-R][-T][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n", progname); exit(1); } /* * in /dev/cpu/ return success for names that are numbers * ie. filter out ".", "..", "microcode". */ int dir_filter(const struct dirent *dirp) { if (isdigit(dirp->d_name[0])) return 1; else return 0; } int open_dev_cpu_msr(int dummy1) { return 0; } void topology_probe() { int i; int max_core_id = 0; int max_package_id = 0; int max_siblings = 0; struct cpu_topology { int core_id; int physical_package_id; } *cpus; /* Initialize num_cpus, max_cpu_num */ topo.num_cpus = 0; topo.max_cpu_num = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) show_cpu = 1; if (verbose > 1) fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); if (cpus == NULL) { perror("calloc cpus"); exit(1); } /* * Allocate and initialize cpu_present_set */ cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_present_set == NULL) { perror("CPU_ALLOC"); exit(3); } cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); /* * Allocate and initialize cpu_affinity_set */ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_affinity_set == NULL) { perror("CPU_ALLOC"); exit(3); } cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); /* * For online cpus * find max_core_id, max_package_id */ for (i = 0; i <= topo.max_cpu_num; ++i) { int siblings; if (cpu_is_not_present(i)) { if (verbose > 1) fprintf(stderr, "cpu%d NOT PRESENT\n", i); continue; } cpus[i].core_id = get_core_id(i); if (cpus[i].core_id > max_core_id) max_core_id = cpus[i].core_id; cpus[i].physical_package_id = get_physical_package_id(i); if (cpus[i].physical_package_id > max_package_id) max_package_id = cpus[i].physical_package_id; siblings = get_num_ht_siblings(i); if (siblings > max_siblings) max_siblings = siblings; if (verbose > 1) fprintf(stderr, "cpu %d pkg %d core %d\n", i, cpus[i].physical_package_id, cpus[i].core_id); } topo.num_cores_per_pkg = max_core_id + 1; if (verbose > 1) fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.num_cores_per_pkg); if (!summary_only && topo.num_cores_per_pkg > 1) show_core = 1; topo.num_packages = max_package_id + 1; if (verbose > 1) fprintf(stderr, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); if (!summary_only && topo.num_packages > 1) show_pkg = 1; topo.num_threads_per_core = max_siblings; if (verbose > 1) fprintf(stderr, "max_siblings %d\n", max_siblings); free(cpus); } void allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) { int i; *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * topo.num_packages, sizeof(struct thread_data)); if (*t == NULL) goto error; for (i = 0; i < topo.num_threads_per_core * topo.num_cores_per_pkg * topo.num_packages; i++) (*t)[i].cpu_id = -1; *c = calloc(topo.num_cores_per_pkg * topo.num_packages, sizeof(struct core_data)); if (*c == NULL) goto error; for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) (*c)[i].core_id = -1; *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; for (i = 0; i < topo.num_packages; i++) (*p)[i].package_id = i; return; error: perror("calloc counters"); exit(1); } /* * init_counter() * * set cpu_id, core_num, pkg_num * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE * * increment topo.num_cores when 1st core in pkg seen */ void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, int thread_num, int core_num, int pkg_num, int cpu_id) { struct thread_data *t; struct core_data *c; struct pkg_data *p; t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); c = GET_CORE(core_base, core_num, pkg_num); p = GET_PKG(pkg_base, pkg_num); t->cpu_id = cpu_id; if (thread_num == 0) { t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; if (cpu_is_first_core_in_package(cpu_id)) t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; } c->core_id = core_num; p->package_id = pkg_num; } int initialize_counters(int cpu_id) { int my_thread_id, my_core_id, my_package_id; my_package_id = get_physical_package_id(cpu_id); my_core_id = get_core_id(cpu_id); if (cpu_is_first_sibling_in_core(cpu_id)) { my_thread_id = 0; topo.num_cores++; } else { my_thread_id = 1; } init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); return 0; } void allocate_output_buffer() { output_buffer = calloc(1, (1 + topo.num_cpus) * 256); outp = output_buffer; if (outp == NULL) { perror("calloc"); exit(-1); } } void setup_all_buffers(void) { topology_probe(); allocate_counters(&thread_even, &core_even, &package_even); allocate_counters(&thread_odd, &core_odd, &package_odd); allocate_output_buffer(); for_all_proc_cpus(initialize_counters); } void turbostat_init() { check_cpuid(); check_dev_msr(); check_super_user(); setup_all_buffers(); if (verbose) print_verbose_header(); if (verbose) for_all_cpus(print_epb, ODD_COUNTERS); if (verbose) for_all_cpus(print_rapl, ODD_COUNTERS); for_all_cpus(set_temperature_target, ODD_COUNTERS); if (verbose) for_all_cpus(print_thermal, ODD_COUNTERS); } int fork_it(char **argv) { pid_t child_pid; int status; status = for_all_cpus(get_counters, EVEN_COUNTERS); if (status) exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); if (!child_pid) { /* child */ execvp(argv[0], argv); } else { /* parent */ if (child_pid == -1) { perror("fork"); exit(1); } signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) { perror("wait"); exit(status); } } /* * n.b. fork_it() does not check for errors from for_all_cpus() * because re-starting is problematic when forking */ for_all_cpus(get_counters, ODD_COUNTERS); gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS); compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_stderr(); fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); return status; } void cmdline(int argc, char **argv) { int opt; progname = argv[0]; while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:RT:")) != -1) { switch (opt) { case 'p': show_core_only++; break; case 'P': show_pkg_only++; break; case 'S': summary_only++; break; case 'v': verbose++; break; case 'i': interval_sec = atoi(optarg); break; case 'c': sscanf(optarg, "%x", &extra_delta_offset32); break; case 'C': sscanf(optarg, "%x", &extra_delta_offset64); break; case 'm': sscanf(optarg, "%x", &extra_msr_offset32); break; case 'M': sscanf(optarg, "%x", &extra_msr_offset64); break; case 'R': rapl_verbose++; break; case 'T': tcc_activation_temp_override = atoi(optarg); break; default: usage(); } } } int main(int argc, char **argv) { cmdline(argc, argv); if (verbose) fprintf(stderr, "turbostat v3.4 April 17, 2013" " - Len Brown <lenb@kernel.org>\n"); turbostat_init(); /* * if any params left, it must be a command to fork */ if (argc - optind) return fork_it(argv + optind); else turbostat_loop(); return 0; }
gpl-2.0
aospo/platform_kernel_oneplus_msm8974
arch/x86/kernel/acpi/boot.c
1942
40882
/* * boot.c - Architecture-Specific Low-Level ACPI Boot Support * * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/init.h> #include <linux/acpi.h> #include <linux/acpi_pmtmr.h> #include <linux/efi.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/pci_x86.h> #include <asm/pgtable.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/io.h> #include <asm/mpspec.h> #include <asm/smp.h> static int __initdata acpi_force = 0; u32 acpi_rsdt_forced; int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 # include <asm/proto.h> # include <asm/numa_64.h> #endif /* X86 */ #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " int acpi_noirq; /* skip ACPI IRQ initialization */ int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); int acpi_lapic; int acpi_ioapic; int acpi_strict; u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; #endif #ifndef __HAVE_ARCH_CMPXCHG #warning ACPI uses CMPXCHG, i486 and later hardware #endif /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ /* * The default interrupt routing model is PIC (8259). This gets * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; /* * ISA irqs by default are the first 16 gsis but can be * any gsi as specified by an interrupt source override. */ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; static unsigned int gsi_to_irq(unsigned int gsi) { unsigned int irq = gsi + NR_IRQS_LEGACY; unsigned int i; for (i = 0; i < NR_IRQS_LEGACY; i++) { if (isa_irq_to_gsi[i] == gsi) { return i; } } /* Provide an identity mapping of gsi == irq * except on truly weird platforms that have * non isa irqs in the first 16 gsis. */ if (gsi >= NR_IRQS_LEGACY) irq = gsi; else irq = gsi_top + gsi; return irq; } static u32 irq_to_gsi(int irq) { unsigned int gsi; if (irq < NR_IRQS_LEGACY) gsi = isa_irq_to_gsi[irq]; else if (irq < gsi_top) gsi = irq; else if (irq < (gsi_top + NR_IRQS_LEGACY)) gsi = irq - gsi_top; else gsi = 0xffffffff; return gsi; } /* * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, * to map the target physical address. The problem is that set_fixmap() * provides a single page, and it is possible that the page is not * sufficient. * By using this area, we can map up to MAX_IO_APICS pages temporarily, * i.e. until the next __va_range() call. * * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and * count idx down while incrementing the phys address. */ char *__init __acpi_map_table(unsigned long phys, unsigned long size) { if (!phys || !size) return NULL; return early_ioremap(phys, size); } void __init __acpi_unmap_table(char *map, unsigned long size) { if (!map || !size) return; early_iounmap(map, size); } #ifdef CONFIG_X86_LOCAL_APIC static int __init acpi_parse_madt(struct acpi_table_header *table) { struct acpi_table_madt *madt = NULL; if (!cpu_has_apic) return -EINVAL; madt = (struct acpi_table_madt *)table; if (!madt) { printk(KERN_WARNING PREFIX "Unable to map MADT\n"); return -ENODEV; } if (madt->address) { acpi_lapic_addr = (u64) madt->address; printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", madt->address); } default_acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); return 0; } static void __cpuinit acpi_register_lapic(int id, u8 enabled) { unsigned int ver = 0; if (id >= (MAX_LOCAL_APIC-1)) { printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); return; } if (!enabled) { ++disabled_cpus; return; } if (boot_cpu_physical_apicid != -1U) ver = apic_version[boot_cpu_physical_apicid]; generic_processor_info(id, ver); } static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; int apic_id; u8 enabled; processor = (struct acpi_madt_local_x2apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; #ifdef CONFIG_X86_X2APIC /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ if (!apic->apic_id_valid(apic_id) && enabled) printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); else acpi_register_lapic(apic_id, enabled); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif return 0; } static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; processor = (struct acpi_madt_local_apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->id, /* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; processor = (struct acpi_madt_local_sapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; return 0; } static int __init acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; if (BAD_MADT_ENTRY(x2apic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (x2apic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (lapic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } #endif /*CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; ioapic = (struct acpi_madt_io_apic *)header; if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); return 0; } /* * Parse Interrupt Source Override for the ACPI SCI */ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi) { if (trigger == 0) /* compatible SCI trigger is level */ trigger = 3; if (polarity == 0) /* compatible SCI polarity is low */ polarity = 3; /* Command-line over-ride via acpi_sci= */ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; /* * mp_config_acpi_legacy_irqs() already setup IRQs < 16 * If GSI is < 16, this will update its flags, * else it will create a new mp_irqs[] entry. */ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); /* * stash over-ride to indicate we've been here * and for later update of acpi_gbl_FADT */ acpi_sci_override_gsi = gsi; return; } static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; intsrc = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } if (intsrc->source_irq == 0) { if (acpi_skip_timer_override) { printk(PREFIX "BIOS IRQ0 override ignored.\n"); return 0; } if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); } } mp_override_legacy_irq(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = NULL; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support nimsrc entries? */ return 0; } #endif /* CONFIG_X86_IO_APIC */ /* * acpi_pic_sci_set_trigger() * * use ELCR to set PIC-mode trigger type for SCI * * If a PIC-mode SCI is not recognized or gives spurious IRQ7's * it may require Edge Trigger -- use "acpi_sci=edge" * * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) { unsigned int mask = 1 << irq; unsigned int old, new; /* Real old ELCR mask */ old = inb(0x4d0) | (inb(0x4d1) << 8); /* * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ new = acpi_noirq ? old : 0; /* * Update SCI information in the ELCR, it isn't in the PCI * routing tables.. */ switch (trigger) { case 1: /* Edge - clear */ new &= ~mask; break; case 3: /* Level - set */ new |= mask; break; } if (old == new) return; printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); outb(new, 0x4d0); outb(new >> 8, 0x4d1); } int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { *irq = gsi_to_irq(gsi); #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) setup_IO_APIC_irq_extra(gsi); #endif return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { if (isa_irq >= 16) return -1; *gsi = irq_to_gsi(isa_irq); return 0; } static int acpi_register_gsi_pic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_PCI /* * Make sure all (legacy) PCI IRQs are set as level-triggered. */ if (trigger == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); #endif return gsi; } static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_IO_APIC gsi = mp_register_gsi(dev, gsi, trigger, polarity); #endif return gsi; } int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic; /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity); irq = gsi_to_irq(plat_gsi); return irq; } void __init acpi_set_irq_model_pic(void) { acpi_irq_model = ACPI_IRQ_MODEL_PIC; __acpi_register_gsi = acpi_register_gsi_pic; acpi_ioapic = 0; } void __init acpi_set_irq_model_ioapic(void) { acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; __acpi_register_gsi = acpi_register_gsi_ioapic; acpi_ioapic = 1; } /* * ACPI based hotplug support for CPU */ #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; nid = acpi_get_node(handle); if (nid == -1 || !node_online(nid)) return; set_apicid_to_node(physid, nid); numa_set_node(cpu, nid); #endif } static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_apic *lapic; cpumask_var_t tmp_map, new_map; u8 physid; int cpu; int retval = -ENOMEM; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; if (!buffer.length || !buffer.pointer) return -EINVAL; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*lapic)) { kfree(buffer.pointer); return -EINVAL; } lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { kfree(buffer.pointer); return -EINVAL; } physid = lapic->id; kfree(buffer.pointer); buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; lapic = NULL; if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) goto free_tmp_map; cpumask_copy(tmp_map, cpu_present_mask); acpi_register_lapic(physid, ACPI_MADT_ENABLED); /* * If mp_register_lapic successfully generates a new logical cpu * number, then the following will get us exactly what was mapped */ cpumask_andnot(new_map, cpu_present_mask, tmp_map); if (cpumask_empty(new_map)) { printk ("Unable to map lapic to logical cpu number\n"); retval = -EINVAL; goto free_new_map; } acpi_processor_set_pdc(handle); cpu = cpumask_first(new_map); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; retval = 0; free_new_map: free_cpumask_var(new_map); free_tmp_map: free_cpumask_var(tmp_map); out: return retval; } /* wrapper to silence section mismatch warning */ int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) { return _acpi_map_lsapic(handle, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { per_cpu(x86_cpu_to_apicid, cpu) = -1; set_cpu_present(cpu, false); num_processors--; return (0); } EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_unregister_ioapic); static int __init acpi_parse_sbf(struct acpi_table_header *table) { struct acpi_table_boot *sb; sb = (struct acpi_table_boot *)table; if (!sb) { printk(KERN_WARNING PREFIX "Unable to map SBF\n"); return -ENODEV; } sbf_port = sb->cmos_index; /* Save CMOS port */ return 0; } #ifdef CONFIG_HPET_TIMER #include <asm/hpet.h> static struct __initdata resource *hpet_res; static int __init acpi_parse_hpet(struct acpi_table_header *table) { struct acpi_table_hpet *hpet_tbl; hpet_tbl = (struct acpi_table_hpet *)table; if (!hpet_tbl) { printk(KERN_WARNING PREFIX "Unable to map HPET\n"); return -ENODEV; } if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { printk(KERN_WARNING PREFIX "HPET timers must be located in " "memory.\n"); return -1; } hpet_address = hpet_tbl->address.address; hpet_blockid = hpet_tbl->sequence; /* * Some broken BIOSes advertise HPET at 0x0. We really do not * want to allocate a resource there. */ if (!hpet_address) { printk(KERN_WARNING PREFIX "HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address); return 0; } #ifdef CONFIG_X86_64 /* * Some even more broken BIOSes advertise HPET at * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add * some noise: */ if (hpet_address == 0xfed0000000000000UL) { if (!hpet_force_user) { printk(KERN_WARNING PREFIX "HPET id: %#x " "base: 0xfed0000000000000 is bogus\n " "try hpet=force on the kernel command line to " "fix it up to 0xfed00000.\n", hpet_tbl->id); hpet_address = 0; return 0; } printk(KERN_WARNING PREFIX "HPET id: %#x base: 0xfed0000000000000 fixed up " "to 0xfed00000.\n", hpet_tbl->id); hpet_address >>= 32; } #endif printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address); /* * Allocate and initialize the HPET firmware resource for adding into * the resource tree during the lateinit timeframe. */ #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", hpet_tbl->sequence); hpet_res->start = hpet_address; hpet_res->end = hpet_address + (1 * 1024) - 1; return 0; } /* * hpet_insert_resource inserts the HPET resources used into the resource * tree. */ static __init int hpet_insert_resource(void) { if (!hpet_res) return 1; return insert_resource(&iomem_resource, hpet_res); } late_initcall(hpet_insert_resource); #else #define acpi_parse_hpet NULL #endif static int __init acpi_parse_fadt(struct acpi_table_header *table) { #ifdef CONFIG_X86_PM_TIMER /* detect the location of the ACPI PM Timer */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { /* FADT rev. 2 */ if (acpi_gbl_FADT.xpm_timer_block.space_id != ACPI_ADR_SPACE_SYSTEM_IO) return 0; pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; /* * "X" fields are optional extensions to the original V1.0 * fields, so we must selectively expand V1.0 fields if the * corresponding X field is zero. */ if (!pmtmr_ioport) pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } else { /* FADT rev. 1 */ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } if (pmtmr_ioport) printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); #endif return 0; } #ifdef CONFIG_X86_LOCAL_APIC /* * Parse LAPIC entries in MADT * returns 0 on success, < 0 on error */ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) { int count; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); return count; } static int __init acpi_parse_madt_lapic_entries(void) { int count; int x2count = 0; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_LOCAL_APIC); if (!count) { x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, acpi_parse_x2apic, MAX_LOCAL_APIC); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_LOCAL_APIC); } if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ return -ENODEV; } else if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, acpi_parse_x2apic_nmi, 0); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 #ifdef CONFIG_X86_ES7000 extern int es7000_plat; #endif void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { int ioapic; int pin; struct mpc_intsrc mp_irq; /* * Convert 'gsi' to 'ioapic.pin'. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return; pin = mp_find_ioapic_pin(ioapic, gsi); /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) trigger = 1; mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger << 2) | polarity; mp_irq.srcbus = MP_ISA_BUS; mp_irq.srcbusirq = bus_irq; /* IRQ */ mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ mp_irq.dstirq = pin; /* INTIN# */ mp_save_irq(&mp_irq); isa_irq_to_gsi[bus_irq] = gsi; } void __init mp_config_acpi_legacy_irqs(void) { int i; struct mpc_intsrc mp_irq; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; #endif set_bit(MP_ISA_BUS, mp_bus_not_pci); pr_debug("Bus #%d is ISA\n", MP_ISA_BUS); #ifdef CONFIG_X86_ES7000 /* * Older generations of ES7000 have no legacy identity mappings */ if (es7000_plat == 1) return; #endif /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < 16; i++) { int ioapic, pin; unsigned int dstapic; int idx; u32 gsi; /* Locate the gsi that irq i maps to. */ if (acpi_isa_irq_to_gsi(i, &gsi)) continue; /* * Locate the IOAPIC that manages the ISA IRQ. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) continue; pin = mp_find_ioapic_pin(ioapic, gsi); dstapic = mpc_ioapic_id(ioapic); for (idx = 0; idx < mp_irq_entries; idx++) { struct mpc_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if (irq->dstapic == dstapic && irq->dstirq == pin) break; } if (idx != mp_irq_entries) { printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); continue; /* IRQ already used */ } mp_irq.type = MP_INTSRC; mp_irq.irqflag = 0; /* Conforming */ mp_irq.srcbus = MP_ISA_BUS; mp_irq.dstapic = dstapic; mp_irq.irqtype = mp_INT; mp_irq.srcbusirq = i; /* Identity mapped */ mp_irq.dstirq = pin; mp_save_irq(&mp_irq); } } static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_MPPARSE struct mpc_intsrc mp_irq; struct pci_dev *pdev; unsigned char number; unsigned int devfn; int ioapic; u8 pin; if (!acpi_ioapic) return 0; if (!dev) return 0; if (dev->bus != &pci_bus_type) return 0; pdev = to_pci_dev(dev); number = pdev->bus->number; devfn = pdev->devfn; pin = pdev->pin; /* print the entry should happen on mptable identically */ mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); mp_irq.srcbus = number; mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mpc_ioapic_id(ioapic); mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); mp_save_irq(&mp_irq); #endif return 0; } int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { int ioapic; int ioapic_pin; struct io_apic_irq_attr irq_attr; if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) return gsi; /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) return gsi; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) { printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); return gsi; } ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); if (ioapic_pin > MP_MAX_IOAPIC_PIN) { printk(KERN_ERR "Invalid reference to IOAPIC pin " "%d-%d\n", mpc_ioapic_id(ioapic), ioapic_pin); return gsi; } if (enable_update_mptable) mp_config_acpi_gsi(dev, gsi, trigger, polarity); set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, polarity == ACPI_ACTIVE_HIGH ? 0 : 1); io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); return gsi; } /* * Parse IOAPIC related entries in MADT * returns 0 on success, < 0 on error */ static int __init acpi_parse_madt_ioapic_entries(void) { int count; /* * ACPI interpreter is required to complete interrupt setup, * so if it is off, don't enumerate the io-apics with ACPI. * If MPS is present, it will handle them, * otherwise the system will stay in PIC mode */ if (acpi_disabled || acpi_noirq) return -ENODEV; if (!cpu_has_apic) return -ENODEV; /* * if "noapic" boot option, don't look for IO-APICs */ if (skip_ioapic_setup) { printk(KERN_INFO PREFIX "Skipping IOAPIC probe " "due to 'noapic' option.\n"); return -ENODEV; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, MAX_IO_APICS); if (!count) { printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); return -ENODEV; } else if (count < 0) { printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); return count; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } /* * If BIOS did not supply an INT_SRC_OVR for the SCI * pretend we got one so we can set the SCI flags. */ if (!acpi_sci_override_gsi) acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, acpi_gbl_FADT.sci_interrupt); /* Fill in identity legacy mappings where no override */ mp_config_acpi_legacy_irqs(); count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #else static inline int acpi_parse_madt_ioapic_entries(void) { return -1; } #endif /* !CONFIG_X86_IO_APIC */ static void __init early_acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = early_acpi_parse_madt_lapic_addr_ovr(); if (!error) { acpi_lapic = 1; smp_found_config = 1; } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } #endif } static void __init acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = acpi_parse_madt_lapic_entries(); if (!error) { acpi_lapic = 1; /* * Parse MADT IO-APIC entries */ error = acpi_parse_madt_ioapic_entries(); if (!error) { acpi_set_irq_model_ioapic(); smp_found_config = 1; } } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } else { /* * ACPI found no MADT, and so ACPI wants UP PIC mode. * In the event an MPS table was found, forget it. * Boot with "acpi=off" to use MPS on such a system. */ if (smp_found_config) { printk(KERN_WARNING PREFIX "No APIC-table, disabling MPS\n"); smp_found_config = 0; } } /* * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " "information\n"); else if (acpi_lapic) printk(KERN_INFO "Using ACPI for processor (LAPIC) " "configuration information\n"); #endif return; } static int __init disable_acpi_irq(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", d->ident); acpi_noirq_set(); } return 0; } static int __init disable_acpi_pci(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", d->ident); acpi_disable_pci(); } return 0; } static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); disable_acpi(); } else { printk(KERN_NOTICE "Warning: DMI blacklist says broken, but acpi forced\n"); } return 0; } /* * Force ignoring BIOS IRQ0 override */ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) { if (!acpi_skip_timer_override) { pr_notice("%s detected: Ignoring BIOS IRQ0 override\n", d->ident); acpi_skip_timer_override = 1; } return 0; } /* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ static struct dmi_system_id __initdata acpi_dmi_table[] = { /* * Boxes that need ACPI disabled */ { .callback = dmi_disable_acpi, .ident = "IBM Thinkpad", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), }, }, /* * Boxes that need ACPI PCI IRQ routing disabled */ { .callback = disable_acpi_irq, .ident = "ASUS A7V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), /* newer BIOS, Revision 1011, does work */ DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), }, }, { /* * Latest BIOS for IBM 600E (1.16) has bad pcinum * for LPC bridge, which is needed for the PCI * interrupt links to work. DSDT fix is in bug 5966. * 2645, 2646 model numbers are shared with 600/600E/600X */ .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2645", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2645"), }, }, { .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2646", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2646"), }, }, /* * Boxes that need ACPI PCI IRQ routing and PCI scan disabled */ { /* _BBN 0 bug */ .callback = disable_acpi_pci, .ident = "ASUS PR-DLS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") }, }, { .callback = disable_acpi_pci, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, {} }; /* second table for DMI checks that should run after early-quirks */ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature * trip points to 16C if the INTIN2 input of the I/O APIC * is enabled. This input is incorrectly designated the * ISA IRQ 0 via an interrupt source override even though * it is wired to the output of the master 8259A and INTIN0 * is not connected at all. Force ignoring BIOS IRQ0 * override in that cases. */ { .callback = dmi_ignore_irq0_timer_override, .ident = "HP nx6115 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6125 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6325 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP 6715b laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "FUJITSU SIEMENS", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), }, }, {} }; /* * acpi_boot_table_init() and acpi_boot_init() * called from setup_arch(), always. * 1. checksums all tables * 2. enumerates lapics * 3. enumerates io-apics * * acpi_table_init() is separate to allow reading SRAT without * other side effects. * * side effects of acpi_boot_init: * acpi_lapic = 1 if LAPIC found * acpi_ioapic = 1 if IOAPIC found * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; * if acpi_blacklisted() acpi_disabled = 1; * acpi_irq_model=... * ... */ void __init acpi_boot_table_init(void) { dmi_check_system(acpi_dmi_table); /* * If acpi_disabled, bail out */ if (acpi_disabled) return; /* * Initialize the ACPI boot-time table parser. */ if (acpi_table_init()) { disable_acpi(); return; } acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * blacklist may disable ACPI entirely */ if (acpi_blacklisted()) { if (acpi_force) { printk(KERN_WARNING PREFIX "acpi=force override\n"); } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); return; } } } int __init early_acpi_boot_init(void) { /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; /* * Process the Multiple APIC Description Table (MADT), if present */ early_acpi_process_madt(); return 0; } int __init acpi_boot_init(void) { /* those are executed after early-quirks are executed */ dmi_check_system(acpi_dmi_table_late); /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * set sci_int and PM timer address */ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); /* * Process the Multiple APIC Description Table (MADT), if present */ acpi_process_madt(); acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); if (!acpi_noirq) x86_init.pci.init = pci_acpi_init; return 0; } static int __init parse_acpi(char *arg) { if (!arg) return -EINVAL; /* "acpi=off" disables both ACPI table parsing and interpreter */ if (strcmp(arg, "off") == 0) { disable_acpi(); } /* acpi=force to over-ride black-list */ else if (strcmp(arg, "force") == 0) { acpi_force = 1; acpi_disabled = 0; } /* acpi=strict disables out-of-spec workarounds */ else if (strcmp(arg, "strict") == 0) { acpi_strict = 1; } /* acpi=rsdt use RSDT instead of XSDT */ else if (strcmp(arg, "rsdt") == 0) { acpi_rsdt_forced = 1; } /* "acpi=noirq" disables ACPI interrupt routing */ else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); } /* "acpi=copy_dsdt" copys DSDT */ else if (strcmp(arg, "copy_dsdt") == 0) { acpi_gbl_copy_dsdt_locally = 1; } else { /* Core will printk when we return error. */ return -EINVAL; } return 0; } early_param("acpi", parse_acpi); /* FIXME: Using pci= for an ACPI parameter is a travesty. */ static int __init parse_pci(char *arg) { if (arg && strcmp(arg, "noacpi") == 0) acpi_disable_pci(); return 0; } early_param("pci", parse_pci); int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ if (acpi_disabled || acpi_noirq) { printk(KERN_WARNING "MPS support code is not built-in.\n" "Using acpi=off or acpi=noirq or pci=noacpi " "may have problem\n"); return 1; } #endif return 0; } #ifdef CONFIG_X86_IO_APIC static int __init parse_acpi_skip_timer_override(char *arg) { acpi_skip_timer_override = 1; return 0; } early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); static int __init parse_acpi_use_timer_override(char *arg) { acpi_use_timer_override = 1; return 0; } early_param("acpi_use_timer_override", parse_acpi_use_timer_override); #endif /* CONFIG_X86_IO_APIC */ static int __init setup_acpi_sci(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "edge")) acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "level")) acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "high")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else if (!strcmp(s, "low")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else return -EINVAL; return 0; } early_param("acpi_sci", setup_acpi_sci); int __acpi_acquire_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } int __acpi_release_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; }
gpl-2.0
NebulaOy/linux
drivers/video/fm2fb.c
2198
9247
/* * linux/drivers/video/fm2fb.c -- BSC FrameMaster II/Rainbow II frame buffer * device * * Copyright (C) 1998 Steffen A. Mork (linux-dev@morknet.de) * Copyright (C) 1999 Geert Uytterhoeven * * Written for 2.0.x by Steffen A. Mork * Ported to 2.1.x by Geert Uytterhoeven * Ported to new api by James Simmons * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/zorro.h> #include <asm/io.h> /* * Some technical notes: * * The BSC FrameMaster II (or Rainbow II) is a simple very dumb * frame buffer which allows to display 24 bit true color images. * Each pixel is 32 bit width so it's very easy to maintain the * frame buffer. One long word has the following layout: * AARRGGBB which means: AA the alpha channel byte, RR the red * channel, GG the green channel and BB the blue channel. * * The FrameMaster II supports the following video modes. * - PAL/NTSC * - interlaced/non interlaced * - composite sync/sync/sync over green * * The resolution is to the following both ones: * - 768x576 (PAL) * - 768x480 (NTSC) * * This means that pixel access per line is fixed due to the * fixed line width. In case of maximal resolution the frame * buffer needs an amount of memory of 1.769.472 bytes which * is near to 2 MByte (the allocated address space of Zorro2). * The memory is channel interleaved. That means every channel * owns four VRAMs. Unfortunately most FrameMasters II are * not assembled with memory for the alpha channel. In this * case it could be possible to add the frame buffer into the * normal memory pool. * * At relative address 0x1ffff8 of the frame buffers base address * there exists a control register with the number of * four control bits. They have the following meaning: * bit value meaning * * 0 1 0=interlaced/1=non interlaced * 1 2 0=video out disabled/1=video out enabled * 2 4 0=normal mode as jumpered via JP8/1=complement mode * 3 8 0=read onboard ROM/1 normal operation (required) * * As mentioned above there are several jumper. I think there * is not very much information about the FrameMaster II in * the world so I add these information for completeness. * * JP1 interlace selection (1-2 non interlaced/2-3 interlaced) * JP2 wait state creation (leave as is!) * JP3 wait state creation (leave as is!) * JP4 modulate composite sync on green output (1-2 composite * sync on green channel/2-3 normal composite sync) * JP5 create test signal, shorting this jumper will create * a white screen * JP6 sync creation (1-2 composite sync/2-3 H-sync output) * JP8 video mode (1-2 PAL/2-3 NTSC) * * With the following jumpering table you can connect the * FrameMaster II to a normal TV via SCART connector: * JP1: 2-3 * JP4: 2-3 * JP6: 2-3 * JP8: 1-2 (means PAL for Europe) * * NOTE: * There is no other possibility to change the video timings * except the interlaced/non interlaced, sync control and the * video mode PAL (50 Hz)/NTSC (60 Hz). Inside this * FrameMaster II driver are assumed values to avoid anomalies * to a future X server. Except the pixel clock is really * constant at 30 MHz. * * 9 pin female video connector: * * 1 analog red 0.7 Vss * 2 analog green 0.7 Vss * 3 analog blue 0.7 Vss * 4 H-sync TTL * 5 V-sync TTL * 6 ground * 7 ground * 8 ground * 9 ground * * Some performance notes: * The FrameMaster II was not designed to display a console * this driver would do! It was designed to display still true * color images. Imagine: When scroll up a text line there * must copied ca. 1.7 MBytes to another place inside this * frame buffer. This means 1.7 MByte read and 1.7 MByte write * over the slow 16 bit wide Zorro2 bus! A scroll of one * line needs 1 second so do not expect to much from this * driver - he is at the limit! * */ /* * definitions */ #define FRAMEMASTER_SIZE 0x200000 #define FRAMEMASTER_REG 0x1ffff8 #define FRAMEMASTER_NOLACE 1 #define FRAMEMASTER_ENABLE 2 #define FRAMEMASTER_COMPL 4 #define FRAMEMASTER_ROM 8 static volatile unsigned char *fm2fb_reg; static struct fb_fix_screeninfo fb_fix = { .smem_len = FRAMEMASTER_REG, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = (768 << 2), .mmio_len = (8), .accel = FB_ACCEL_NONE, }; static int fm2fb_mode = -1; #define FM2FB_MODE_PAL 0 #define FM2FB_MODE_NTSC 1 static struct fb_var_screeninfo fb_var_modes[] = { { /* 768 x 576, 32 bpp (PAL) */ 768, 576, 768, 576, 0, 0, 32, 0, { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 8, 0 }, 0, FB_ACTIVATE_NOW, -1, -1, FB_ACCEL_NONE, 33333, 10, 102, 10, 5, 80, 34, FB_SYNC_COMP_HIGH_ACT, 0 }, { /* 768 x 480, 32 bpp (NTSC - not supported yet */ 768, 480, 768, 480, 0, 0, 32, 0, { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 8, 0 }, 0, FB_ACTIVATE_NOW, -1, -1, FB_ACCEL_NONE, 33333, 10, 102, 10, 5, 80, 34, FB_SYNC_COMP_HIGH_ACT, 0 } }; /* * Interface used by the world */ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int fm2fb_blank(int blank, struct fb_info *info); static struct fb_ops fm2fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = fm2fb_setcolreg, .fb_blank = fm2fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Blank the display. */ static int fm2fb_blank(int blank, struct fb_info *info) { unsigned char t = FRAMEMASTER_ROM; if (!blank) t |= FRAMEMASTER_ENABLE | FRAMEMASTER_NOLACE; fm2fb_reg[0] = t; return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno < 16) { red >>= 8; green >>= 8; blue >>= 8; ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue; } return 0; } /* * Initialisation */ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id); static struct zorro_device_id fm2fb_devices[] = { { ZORRO_PROD_BSC_FRAMEMASTER_II }, { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } }; MODULE_DEVICE_TABLE(zorro, fm2fb_devices); static struct zorro_driver fm2fb_driver = { .name = "fm2fb", .id_table = fm2fb_devices, .probe = fm2fb_probe, }; static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id) { struct fb_info *info; unsigned long *ptr; int is_fm; int x, y; is_fm = z->id == ZORRO_PROD_BSC_FRAMEMASTER_II; if (!zorro_request_device(z,"fm2fb")) return -ENXIO; info = framebuffer_alloc(16 * sizeof(u32), &z->dev); if (!info) { zorro_release_device(z); return -ENOMEM; } if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); zorro_release_device(z); return -ENOMEM; } /* assigning memory to kernel space */ fb_fix.smem_start = zorro_resource_start(z); info->screen_base = ioremap(fb_fix.smem_start, FRAMEMASTER_SIZE); fb_fix.mmio_start = fb_fix.smem_start + FRAMEMASTER_REG; fm2fb_reg = (unsigned char *)(info->screen_base+FRAMEMASTER_REG); strcpy(fb_fix.id, is_fm ? "FrameMaster II" : "Rainbow II"); /* make EBU color bars on display */ ptr = (unsigned long *)fb_fix.smem_start; for (y = 0; y < 576; y++) { for (x = 0; x < 96; x++) *ptr++ = 0xffffff;/* white */ for (x = 0; x < 96; x++) *ptr++ = 0xffff00;/* yellow */ for (x = 0; x < 96; x++) *ptr++ = 0x00ffff;/* cyan */ for (x = 0; x < 96; x++) *ptr++ = 0x00ff00;/* green */ for (x = 0; x < 96; x++) *ptr++ = 0xff00ff;/* magenta */ for (x = 0; x < 96; x++) *ptr++ = 0xff0000;/* red */ for (x = 0; x < 96; x++) *ptr++ = 0x0000ff;/* blue */ for (x = 0; x < 96; x++) *ptr++ = 0x000000;/* black */ } fm2fb_blank(0, info); if (fm2fb_mode == -1) fm2fb_mode = FM2FB_MODE_PAL; info->fbops = &fm2fb_ops; info->var = fb_var_modes[fm2fb_mode]; info->pseudo_palette = info->par; info->par = NULL; info->fix = fb_fix; info->flags = FBINFO_DEFAULT; if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); iounmap(info->screen_base); framebuffer_release(info); zorro_release_device(z); return -EINVAL; } fb_info(info, "%s frame buffer device\n", fb_fix.id); return 0; } int __init fm2fb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "pal", 3)) fm2fb_mode = FM2FB_MODE_PAL; else if (!strncmp(this_opt, "ntsc", 4)) fm2fb_mode = FM2FB_MODE_NTSC; } return 0; } int __init fm2fb_init(void) { char *option = NULL; if (fb_get_options("fm2fb", &option)) return -ENODEV; fm2fb_setup(option); return zorro_register_driver(&fm2fb_driver); } module_init(fm2fb_init); MODULE_LICENSE("GPL");
gpl-2.0
googyanas/Googy-Max-N4-TW511-Kernel
drivers/media/rc/streamzap.c
2710
14121
/* * Streamzap Remote Control driver * * Copyright (c) 2005 Christoph Bartelmus <lirc@bartelmus.de> * Copyright (c) 2010 Jarod Wilson <jarod@wilsonet.com> * * This driver was based on the work of Greg Wickham and Adrian * Dewhurst. It was substantially rewritten to support correct signal * gaps and now maintains a delay buffer, which is used to present * consistent timing behaviour to user space applications. Without the * delay buffer an ugly hack would be required in lircd, which can * cause sluggish signal decoding in certain situations. * * Ported to in-kernel ir-core interface by Jarod Wilson * * This driver is based on the USB skeleton driver packaged with the * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #define DRIVER_VERSION "1.61" #define DRIVER_NAME "streamzap" #define DRIVER_DESC "Streamzap Remote Control driver" #ifdef CONFIG_USB_DEBUG static bool debug = 1; #else static bool debug; #endif #define USB_STREAMZAP_VENDOR_ID 0x0e9c #define USB_STREAMZAP_PRODUCT_ID 0x0000 /* table of devices that work with this driver */ static struct usb_device_id streamzap_table[] = { /* Streamzap Remote Control */ { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(usb, streamzap_table); #define SZ_PULSE_MASK 0xf0 #define SZ_SPACE_MASK 0x0f #define SZ_TIMEOUT 0xff #define SZ_RESOLUTION 256 /* number of samples buffered */ #define SZ_BUF_LEN 128 /* from ir-rc5-sz-decoder.c */ #ifdef CONFIG_IR_RC5_SZ_DECODER_MODULE #define load_rc5_sz_decode() request_module("ir-rc5-sz-decoder") #else #define load_rc5_sz_decode() {} #endif enum StreamzapDecoderState { PulseSpace, FullPulse, FullSpace, IgnorePulse }; /* structure to hold our device specific stuff */ struct streamzap_ir { /* ir-core */ struct rc_dev *rdev; /* core device info */ struct device *dev; /* usb */ struct usb_device *usbdev; struct usb_interface *interface; struct usb_endpoint_descriptor *endpoint; struct urb *urb_in; /* buffer & dma */ unsigned char *buf_in; dma_addr_t dma_in; unsigned int buf_in_len; /* track what state we're in */ enum StreamzapDecoderState decoder_state; /* tracks whether we are currently receiving some signal */ bool idle; /* sum of signal lengths received since signal start */ unsigned long sum; /* start time of signal; necessary for gap tracking */ struct timeval signal_last; struct timeval signal_start; bool timeout_enabled; char name[128]; char phys[64]; }; /* local function prototypes */ static int streamzap_probe(struct usb_interface *interface, const struct usb_device_id *id); static void streamzap_disconnect(struct usb_interface *interface); static void streamzap_callback(struct urb *urb); static int streamzap_suspend(struct usb_interface *intf, pm_message_t message); static int streamzap_resume(struct usb_interface *intf); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver streamzap_driver = { .name = DRIVER_NAME, .probe = streamzap_probe, .disconnect = streamzap_disconnect, .suspend = streamzap_suspend, .resume = streamzap_resume, .id_table = streamzap_table, }; static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir) { dev_dbg(sz->dev, "Storing %s with duration %u us\n", (rawir.pulse ? "pulse" : "space"), rawir.duration); ir_raw_event_store_with_filter(sz->rdev, &rawir); } static void sz_push_full_pulse(struct streamzap_ir *sz, unsigned char value) { DEFINE_IR_RAW_EVENT(rawir); if (sz->idle) { long deltv; sz->signal_last = sz->signal_start; do_gettimeofday(&sz->signal_start); deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec; rawir.pulse = false; if (deltv > 15) { /* really long time */ rawir.duration = IR_MAX_DURATION; } else { rawir.duration = (int)(deltv * 1000000 + sz->signal_start.tv_usec - sz->signal_last.tv_usec); rawir.duration -= sz->sum; rawir.duration = US_TO_NS(rawir.duration); rawir.duration &= IR_MAX_DURATION; } sz_push(sz, rawir); sz->idle = false; sz->sum = 0; } rawir.pulse = true; rawir.duration = ((int) value) * SZ_RESOLUTION; rawir.duration += SZ_RESOLUTION / 2; sz->sum += rawir.duration; rawir.duration = US_TO_NS(rawir.duration); rawir.duration &= IR_MAX_DURATION; sz_push(sz, rawir); } static void sz_push_half_pulse(struct streamzap_ir *sz, unsigned char value) { sz_push_full_pulse(sz, (value & SZ_PULSE_MASK) >> 4); } static void sz_push_full_space(struct streamzap_ir *sz, unsigned char value) { DEFINE_IR_RAW_EVENT(rawir); rawir.pulse = false; rawir.duration = ((int) value) * SZ_RESOLUTION; rawir.duration += SZ_RESOLUTION / 2; sz->sum += rawir.duration; rawir.duration = US_TO_NS(rawir.duration); sz_push(sz, rawir); } static void sz_push_half_space(struct streamzap_ir *sz, unsigned long value) { sz_push_full_space(sz, value & SZ_SPACE_MASK); } /** * streamzap_callback - usb IRQ handler callback * * This procedure is invoked on reception of data from * the usb remote. */ static void streamzap_callback(struct urb *urb) { struct streamzap_ir *sz; unsigned int i; int len; if (!urb) return; sz = urb->context; len = urb->actual_length; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* * this urb is terminated, clean up. * sz might already be invalid at this point */ dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); return; default: break; } dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); for (i = 0; i < len; i++) { dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n", i, (unsigned char)sz->buf_in[i]); switch (sz->decoder_state) { case PulseSpace: if ((sz->buf_in[i] & SZ_PULSE_MASK) == SZ_PULSE_MASK) { sz->decoder_state = FullPulse; continue; } else if ((sz->buf_in[i] & SZ_SPACE_MASK) == SZ_SPACE_MASK) { sz_push_half_pulse(sz, sz->buf_in[i]); sz->decoder_state = FullSpace; continue; } else { sz_push_half_pulse(sz, sz->buf_in[i]); sz_push_half_space(sz, sz->buf_in[i]); } break; case FullPulse: sz_push_full_pulse(sz, sz->buf_in[i]); sz->decoder_state = IgnorePulse; break; case FullSpace: if (sz->buf_in[i] == SZ_TIMEOUT) { DEFINE_IR_RAW_EVENT(rawir); rawir.pulse = false; rawir.duration = sz->rdev->timeout; sz->idle = true; if (sz->timeout_enabled) sz_push(sz, rawir); ir_raw_event_handle(sz->rdev); ir_raw_event_reset(sz->rdev); } else { sz_push_full_space(sz, sz->buf_in[i]); } sz->decoder_state = PulseSpace; break; case IgnorePulse: if ((sz->buf_in[i] & SZ_SPACE_MASK) == SZ_SPACE_MASK) { sz->decoder_state = FullSpace; continue; } sz_push_half_space(sz, sz->buf_in[i]); sz->decoder_state = PulseSpace; break; } } ir_raw_event_handle(sz->rdev); usb_submit_urb(urb, GFP_ATOMIC); return; } static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz) { struct rc_dev *rdev; struct device *dev = sz->dev; int ret; rdev = rc_allocate_device(); if (!rdev) { dev_err(dev, "remote dev allocation failed\n"); goto out; } snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared " "Receiver (%04x:%04x)", le16_to_cpu(sz->usbdev->descriptor.idVendor), le16_to_cpu(sz->usbdev->descriptor.idProduct)); usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys)); strlcat(sz->phys, "/input0", sizeof(sz->phys)); rdev->input_name = sz->name; rdev->input_phys = sz->phys; usb_to_input_id(sz->usbdev, &rdev->input_id); rdev->dev.parent = dev; rdev->priv = sz; rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protos = RC_BIT_ALL; rdev->driver_name = DRIVER_NAME; rdev->map_name = RC_MAP_STREAMZAP; ret = rc_register_device(rdev); if (ret < 0) { dev_err(dev, "remote input device register failed\n"); goto out; } return rdev; out: rc_free_device(rdev); return NULL; } /** * streamzap_probe * * Called by usb-core to associated with a candidate device * On any failure the return value is the ERROR * On success return 0 */ static int streamzap_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(intf); struct usb_host_interface *iface_host; struct streamzap_ir *sz = NULL; char buf[63], name[128] = ""; int retval = -ENOMEM; int pipe, maxp; /* Allocate space for device driver specific data */ sz = kzalloc(sizeof(struct streamzap_ir), GFP_KERNEL); if (!sz) return -ENOMEM; sz->usbdev = usbdev; sz->interface = intf; /* Check to ensure endpoint information matches requirements */ iface_host = intf->cur_altsetting; if (iface_host->desc.bNumEndpoints != 1) { dev_err(&intf->dev, "%s: Unexpected desc.bNumEndpoints (%d)\n", __func__, iface_host->desc.bNumEndpoints); retval = -ENODEV; goto free_sz; } sz->endpoint = &(iface_host->endpoint[0].desc); if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != USB_DIR_IN) { dev_err(&intf->dev, "%s: endpoint doesn't match input device " "02%02x\n", __func__, sz->endpoint->bEndpointAddress); retval = -ENODEV; goto free_sz; } if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) { dev_err(&intf->dev, "%s: endpoint attributes don't match xfer " "02%02x\n", __func__, sz->endpoint->bmAttributes); retval = -ENODEV; goto free_sz; } pipe = usb_rcvintpipe(usbdev, sz->endpoint->bEndpointAddress); maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe)); if (maxp == 0) { dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n", __func__); retval = -ENODEV; goto free_sz; } /* Allocate the USB buffer and IRQ URB */ sz->buf_in = usb_alloc_coherent(usbdev, maxp, GFP_ATOMIC, &sz->dma_in); if (!sz->buf_in) goto free_sz; sz->urb_in = usb_alloc_urb(0, GFP_KERNEL); if (!sz->urb_in) goto free_buf_in; sz->dev = &intf->dev; sz->buf_in_len = maxp; if (usbdev->descriptor.iManufacturer && usb_string(usbdev, usbdev->descriptor.iManufacturer, buf, sizeof(buf)) > 0) strlcpy(name, buf, sizeof(name)); if (usbdev->descriptor.iProduct && usb_string(usbdev, usbdev->descriptor.iProduct, buf, sizeof(buf)) > 0) snprintf(name + strlen(name), sizeof(name) - strlen(name), " %s", buf); sz->rdev = streamzap_init_rc_dev(sz); if (!sz->rdev) goto rc_dev_fail; sz->idle = true; sz->decoder_state = PulseSpace; /* FIXME: don't yet have a way to set this */ sz->timeout_enabled = true; sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) & IR_MAX_DURATION) | 0x03000000); #if 0 /* not yet supported, depends on patches from maxim */ /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); #endif do_gettimeofday(&sz->signal_start); /* Complete final initialisations */ usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in, maxp, (usb_complete_t)streamzap_callback, sz, sz->endpoint->bInterval); sz->urb_in->transfer_dma = sz->dma_in; sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_set_intfdata(intf, sz); if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) dev_err(sz->dev, "urb submit failed\n"); dev_info(sz->dev, "Registered %s on usb%d:%d\n", name, usbdev->bus->busnum, usbdev->devnum); /* Load the streamzap not-quite-rc5 decoder too */ load_rc5_sz_decode(); return 0; rc_dev_fail: usb_free_urb(sz->urb_in); free_buf_in: usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in); free_sz: kfree(sz); return retval; } /** * streamzap_disconnect * * Called by the usb core when the device is removed from the system. * * This routine guarantees that the driver will not submit any more urbs * by clearing dev->usbdev. It is also supposed to terminate any currently * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(), * does not provide any way to do this. */ static void streamzap_disconnect(struct usb_interface *interface) { struct streamzap_ir *sz = usb_get_intfdata(interface); struct usb_device *usbdev = interface_to_usbdev(interface); usb_set_intfdata(interface, NULL); if (!sz) return; sz->usbdev = NULL; rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in); kfree(sz); } static int streamzap_suspend(struct usb_interface *intf, pm_message_t message) { struct streamzap_ir *sz = usb_get_intfdata(intf); usb_kill_urb(sz->urb_in); return 0; } static int streamzap_resume(struct usb_interface *intf) { struct streamzap_ir *sz = usb_get_intfdata(intf); if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { dev_err(sz->dev, "Error sumbiting urb\n"); return -EIO; } return 0; } module_usb_driver(streamzap_driver); MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging messages");
gpl-2.0
c0d3x42/P8000-Kernel
drivers/scsi/aic94xx/aic94xx_init.c
3222
28469
/* * Aic94xx SAS/SATA driver initialization. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include "aic94xx.h" #include "aic94xx_reg.h" #include "aic94xx_hwi.h" #include "aic94xx_seq.h" #include "aic94xx_sds.h" /* The format is "version.release.patchlevel" */ #define ASD_DRIVER_VERSION "1.0.3" static int use_msi = 0; module_param_named(use_msi, use_msi, int, S_IRUGO); MODULE_PARM_DESC(use_msi, "\n" "\tEnable(1) or disable(0) using PCI MSI.\n" "\tDefault: 0"); static int lldd_max_execute_num = 0; module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); MODULE_PARM_DESC(collector, "\n" "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" "\tThe aic94xx SAS LLDD supports both modes.\n" "\tDefault: 0 (Direct Mode).\n"); static struct scsi_transport_template *aic94xx_transport_template; static int asd_scan_finished(struct Scsi_Host *, unsigned long); static void asd_scan_start(struct Scsi_Host *); static struct scsi_host_template aic94xx_sht = { .module = THIS_MODULE, /* .name is initialized */ .name = "aic94xx", .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = asd_scan_finished, .scan_start = asd_scan_start, .change_queue_depth = sas_change_queue_depth, .change_queue_type = sas_change_queue_type, .bios_param = sas_bios_param, .can_queue = 1, .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, }; static int asd_map_memio(struct asd_ha_struct *asd_ha) { int err, i; struct asd_ha_addrspace *io_handle; asd_ha->iospace = 0; for (i = 0; i < 3; i += 2) { io_handle = &asd_ha->io_handle[i==0?0:1]; io_handle->start = pci_resource_start(asd_ha->pcidev, i); io_handle->len = pci_resource_len(asd_ha->pcidev, i); io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); err = -ENODEV; if (!io_handle->start || !io_handle->len) { asd_printk("MBAR%d start or length for %s is 0.\n", i==0?0:1, pci_name(asd_ha->pcidev)); goto Err; } err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); if (err) { asd_printk("couldn't reserve memory region for %s\n", pci_name(asd_ha->pcidev)); goto Err; } if (io_handle->flags & IORESOURCE_CACHEABLE) io_handle->addr = ioremap(io_handle->start, io_handle->len); else io_handle->addr = ioremap_nocache(io_handle->start, io_handle->len); if (!io_handle->addr) { asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, pci_name(asd_ha->pcidev)); goto Err_unreq; } } return 0; Err_unreq: pci_release_region(asd_ha->pcidev, i); Err: if (i > 0) { io_handle = &asd_ha->io_handle[0]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 0); } return err; } static void asd_unmap_memio(struct asd_ha_struct *asd_ha) { struct asd_ha_addrspace *io_handle; io_handle = &asd_ha->io_handle[1]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 2); io_handle = &asd_ha->io_handle[0]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 0); } static int asd_map_ioport(struct asd_ha_struct *asd_ha) { int i = PCI_IOBAR_OFFSET, err; struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; asd_ha->iospace = 1; io_handle->start = pci_resource_start(asd_ha->pcidev, i); io_handle->len = pci_resource_len(asd_ha->pcidev, i); io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); io_handle->addr = (void __iomem *) io_handle->start; if (!io_handle->start || !io_handle->len) { asd_printk("couldn't get IO ports for %s\n", pci_name(asd_ha->pcidev)); return -ENODEV; } err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); if (err) { asd_printk("couldn't reserve io space for %s\n", pci_name(asd_ha->pcidev)); } return err; } static void asd_unmap_ioport(struct asd_ha_struct *asd_ha) { pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET); } static int asd_map_ha(struct asd_ha_struct *asd_ha) { int err; u16 cmd_reg; err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg); if (err) { asd_printk("couldn't read command register of %s\n", pci_name(asd_ha->pcidev)); goto Err; } err = -ENODEV; if (cmd_reg & PCI_COMMAND_MEMORY) { if ((err = asd_map_memio(asd_ha))) goto Err; } else if (cmd_reg & PCI_COMMAND_IO) { if ((err = asd_map_ioport(asd_ha))) goto Err; asd_printk("%s ioport mapped -- upgrade your hardware\n", pci_name(asd_ha->pcidev)); } else { asd_printk("no proper device access to %s\n", pci_name(asd_ha->pcidev)); goto Err; } return 0; Err: return err; } static void asd_unmap_ha(struct asd_ha_struct *asd_ha) { if (asd_ha->iospace) asd_unmap_ioport(asd_ha); else asd_unmap_memio(asd_ha); } static const char *asd_dev_rev[30] = { [0] = "A0", [1] = "A1", [8] = "B0", }; static int asd_common_setup(struct asd_ha_struct *asd_ha) { int err, i; asd_ha->revision_id = asd_ha->pcidev->revision; err = -ENODEV; if (asd_ha->revision_id < AIC9410_DEV_REV_B0) { asd_printk("%s is revision %s (%X), which is not supported\n", pci_name(asd_ha->pcidev), asd_dev_rev[asd_ha->revision_id], asd_ha->revision_id); goto Err; } /* Provide some sane default values. */ asd_ha->hw_prof.max_scbs = 512; asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS; asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; /* All phys are enabled, by default. */ asd_ha->hw_prof.enabled_phys = 0xFF; for (i = 0; i < ASD_MAX_PHYS; i++) { asd_ha->hw_prof.phy_desc[i].max_sas_lrate = SAS_LINK_RATE_3_0_GBPS; asd_ha->hw_prof.phy_desc[i].min_sas_lrate = SAS_LINK_RATE_1_5_GBPS; asd_ha->hw_prof.phy_desc[i].max_sata_lrate = SAS_LINK_RATE_1_5_GBPS; asd_ha->hw_prof.phy_desc[i].min_sata_lrate = SAS_LINK_RATE_1_5_GBPS; } return 0; Err: return err; } static int asd_aic9410_setup(struct asd_ha_struct *asd_ha) { int err = asd_common_setup(asd_ha); if (err) return err; asd_ha->hw_prof.addr_range = 8; asd_ha->hw_prof.port_name_base = 0; asd_ha->hw_prof.dev_name_base = 8; asd_ha->hw_prof.sata_name_base = 16; return 0; } static int asd_aic9405_setup(struct asd_ha_struct *asd_ha) { int err = asd_common_setup(asd_ha); if (err) return err; asd_ha->hw_prof.addr_range = 4; asd_ha->hw_prof.port_name_base = 0; asd_ha->hw_prof.dev_name_base = 4; asd_ha->hw_prof.sata_name_base = 8; return 0; } static ssize_t asd_show_dev_rev(struct device *dev, struct device_attribute *attr, char *buf) { struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); return snprintf(buf, PAGE_SIZE, "%s\n", asd_dev_rev[asd_ha->revision_id]); } static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); static ssize_t asd_show_dev_bios_build(struct device *dev, struct device_attribute *attr,char *buf) { struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld); } static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL); static ssize_t asd_show_dev_pcba_sn(struct device *dev, struct device_attribute *attr, char *buf) { struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn); } static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); #define FLASH_CMD_NONE 0x00 #define FLASH_CMD_UPDATE 0x01 #define FLASH_CMD_VERIFY 0x02 struct flash_command { u8 command[8]; int code; }; static struct flash_command flash_command_table[] = { {"verify", FLASH_CMD_VERIFY}, {"update", FLASH_CMD_UPDATE}, {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ }; struct error_bios { char *reason; int err_code; }; static struct error_bios flash_error_table[] = { {"Failed to open bios image file", FAIL_OPEN_BIOS_FILE}, {"PCI ID mismatch", FAIL_CHECK_PCI_ID}, {"Checksum mismatch", FAIL_CHECK_SUM}, {"Unknown Error", FAIL_UNKNOWN}, {"Failed to verify.", FAIL_VERIFY}, {"Failed to reset flash chip.", FAIL_RESET_FLASH}, {"Failed to find flash chip type.", FAIL_FIND_FLASH_ID}, {"Failed to erash flash chip.", FAIL_ERASE_FLASH}, {"Failed to program flash chip.", FAIL_WRITE_FLASH}, {"Flash in progress", FLASH_IN_PROGRESS}, {"Image file size Error", FAIL_FILE_SIZE}, {"Input parameter error", FAIL_PARAMETERS}, {"Out of memory", FAIL_OUT_MEMORY}, {"OK", 0} /* Last entry err_code = 0. */ }; static ssize_t asd_store_update_bios(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); char *cmd_ptr, *filename_ptr; struct bios_file_header header, *hdr_ptr; int res, i; u32 csum = 0; int flash_command = FLASH_CMD_NONE; int err = 0; cmd_ptr = kzalloc(count*2, GFP_KERNEL); if (!cmd_ptr) { err = FAIL_OUT_MEMORY; goto out; } filename_ptr = cmd_ptr + count; res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); if (res != 2) { err = FAIL_PARAMETERS; goto out1; } for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { if (!memcmp(flash_command_table[i].command, cmd_ptr, strlen(cmd_ptr))) { flash_command = flash_command_table[i].code; break; } } if (flash_command == FLASH_CMD_NONE) { err = FAIL_PARAMETERS; goto out1; } if (asd_ha->bios_status == FLASH_IN_PROGRESS) { err = FLASH_IN_PROGRESS; goto out1; } err = request_firmware(&asd_ha->bios_image, filename_ptr, &asd_ha->pcidev->dev); if (err) { asd_printk("Failed to load bios image file %s, error %d\n", filename_ptr, err); err = FAIL_OPEN_BIOS_FILE; goto out1; } hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data; if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor || hdr_ptr->contrl_id.device != asd_ha->pcidev->device) && (hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor || hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) { ASD_DPRINTK("The PCI vendor or device id does not match\n"); ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x" " pci vendor=%x pci dev=%x\n", hdr_ptr->contrl_id.vendor, hdr_ptr->contrl_id.device, hdr_ptr->contrl_id.sub_vendor, hdr_ptr->contrl_id.sub_device, asd_ha->pcidev->vendor, asd_ha->pcidev->device); err = FAIL_CHECK_PCI_ID; goto out2; } if (hdr_ptr->filelen != asd_ha->bios_image->size) { err = FAIL_FILE_SIZE; goto out2; } /* calculate checksum */ for (i = 0; i < hdr_ptr->filelen; i++) csum += asd_ha->bios_image->data[i]; if ((csum & 0x0000ffff) != hdr_ptr->checksum) { ASD_DPRINTK("BIOS file checksum mismatch\n"); err = FAIL_CHECK_SUM; goto out2; } if (flash_command == FLASH_CMD_UPDATE) { asd_ha->bios_status = FLASH_IN_PROGRESS; err = asd_write_flash_seg(asd_ha, &asd_ha->bios_image->data[sizeof(*hdr_ptr)], 0, hdr_ptr->filelen-sizeof(*hdr_ptr)); if (!err) err = asd_verify_flash_seg(asd_ha, &asd_ha->bios_image->data[sizeof(*hdr_ptr)], 0, hdr_ptr->filelen-sizeof(*hdr_ptr)); } else { asd_ha->bios_status = FLASH_IN_PROGRESS; err = asd_verify_flash_seg(asd_ha, &asd_ha->bios_image->data[sizeof(header)], 0, hdr_ptr->filelen-sizeof(header)); } out2: release_firmware(asd_ha->bios_image); out1: kfree(cmd_ptr); out: asd_ha->bios_status = err; if (!err) return count; else return -err; } static ssize_t asd_show_update_bios(struct device *dev, struct device_attribute *attr, char *buf) { int i; struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); for (i = 0; flash_error_table[i].err_code != 0; i++) { if (flash_error_table[i].err_code == asd_ha->bios_status) break; } if (asd_ha->bios_status != FLASH_IN_PROGRESS) asd_ha->bios_status = FLASH_OK; return snprintf(buf, PAGE_SIZE, "status=%x %s\n", flash_error_table[i].err_code, flash_error_table[i].reason); } static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR, asd_show_update_bios, asd_store_update_bios); static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) { int err; err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); if (err) return err; err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); if (err) goto err_rev; err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); if (err) goto err_biosb; err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); if (err) goto err_update_bios; return 0; err_update_bios: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); err_biosb: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); err_rev: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); return err; } static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) { device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); } /* The first entry, 0, is used for dynamic ids, the rest for devices * we know about. */ static const struct asd_pcidev_struct { const char * name; int (*setup)(struct asd_ha_struct *asd_ha); } asd_pcidev_data[] = { /* Id 0 is used for dynamic ids. */ { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", .setup = asd_aic9410_setup }, { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter", .setup = asd_aic9410_setup }, { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter", .setup = asd_aic9405_setup }, }; static int asd_create_ha_caches(struct asd_ha_struct *asd_ha) { asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", &asd_ha->pcidev->dev, sizeof(struct scb), 8, 0); if (!asd_ha->scb_pool) { asd_printk("couldn't create scb pool\n"); return -ENOMEM; } return 0; } /** * asd_free_edbs -- free empty data buffers * asd_ha: pointer to host adapter structure */ static void asd_free_edbs(struct asd_ha_struct *asd_ha) { struct asd_seq_data *seq = &asd_ha->seq; int i; for (i = 0; i < seq->num_edbs; i++) asd_free_coherent(asd_ha, seq->edb_arr[i]); kfree(seq->edb_arr); seq->edb_arr = NULL; } static void asd_free_escbs(struct asd_ha_struct *asd_ha) { struct asd_seq_data *seq = &asd_ha->seq; int i; for (i = 0; i < seq->num_escbs; i++) { if (!list_empty(&seq->escb_arr[i]->list)) list_del_init(&seq->escb_arr[i]->list); asd_ascb_free(seq->escb_arr[i]); } kfree(seq->escb_arr); seq->escb_arr = NULL; } static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) { int i; if (asd_ha->hw_prof.ddb_ext) asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext); if (asd_ha->hw_prof.scb_ext) asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); if (asd_ha->hw_prof.ddb_bitmap) kfree(asd_ha->hw_prof.ddb_bitmap); asd_ha->hw_prof.ddb_bitmap = NULL; for (i = 0; i < ASD_MAX_PHYS; i++) { struct asd_phy *phy = &asd_ha->phys[i]; asd_free_coherent(asd_ha, phy->id_frm_tok); } if (asd_ha->seq.escb_arr) asd_free_escbs(asd_ha); if (asd_ha->seq.edb_arr) asd_free_edbs(asd_ha); if (asd_ha->hw_prof.ue.area) { kfree(asd_ha->hw_prof.ue.area); asd_ha->hw_prof.ue.area = NULL; } if (asd_ha->seq.tc_index_array) { kfree(asd_ha->seq.tc_index_array); kfree(asd_ha->seq.tc_index_bitmap); asd_ha->seq.tc_index_array = NULL; asd_ha->seq.tc_index_bitmap = NULL; } if (asd_ha->seq.actual_dl) { asd_free_coherent(asd_ha, asd_ha->seq.actual_dl); asd_ha->seq.actual_dl = NULL; asd_ha->seq.dl = NULL; } if (asd_ha->seq.next_scb.vaddr) { dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr, asd_ha->seq.next_scb.dma_handle); asd_ha->seq.next_scb.vaddr = NULL; } dma_pool_destroy(asd_ha->scb_pool); asd_ha->scb_pool = NULL; } struct kmem_cache *asd_dma_token_cache; struct kmem_cache *asd_ascb_cache; static int asd_create_global_caches(void) { if (!asd_dma_token_cache) { asd_dma_token_cache = kmem_cache_create(ASD_DRIVER_NAME "_dma_token", sizeof(struct asd_dma_tok), 0, SLAB_HWCACHE_ALIGN, NULL); if (!asd_dma_token_cache) { asd_printk("couldn't create dma token cache\n"); return -ENOMEM; } } if (!asd_ascb_cache) { asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb", sizeof(struct asd_ascb), 0, SLAB_HWCACHE_ALIGN, NULL); if (!asd_ascb_cache) { asd_printk("couldn't create ascb cache\n"); goto Err; } } return 0; Err: kmem_cache_destroy(asd_dma_token_cache); asd_dma_token_cache = NULL; return -ENOMEM; } static void asd_destroy_global_caches(void) { if (asd_dma_token_cache) kmem_cache_destroy(asd_dma_token_cache); asd_dma_token_cache = NULL; if (asd_ascb_cache) kmem_cache_destroy(asd_ascb_cache); asd_ascb_cache = NULL; } static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) { int i; struct asd_sas_phy **sas_phys = kcalloc(ASD_MAX_PHYS, sizeof(*sas_phys), GFP_KERNEL); struct asd_sas_port **sas_ports = kcalloc(ASD_MAX_PHYS, sizeof(*sas_ports), GFP_KERNEL); if (!sas_phys || !sas_ports) { kfree(sas_phys); kfree(sas_ports); return -ENOMEM; } asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name; asd_ha->sas_ha.lldd_module = THIS_MODULE; asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0]; for (i = 0; i < ASD_MAX_PHYS; i++) { sas_phys[i] = &asd_ha->phys[i].sas_phy; sas_ports[i] = &asd_ha->ports[i]; } asd_ha->sas_ha.sas_phy = sas_phys; asd_ha->sas_ha.sas_port= sas_ports; asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; return sas_register_ha(&asd_ha->sas_ha); } static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha) { int err; err = sas_unregister_ha(&asd_ha->sas_ha); sas_remove_host(asd_ha->sas_ha.core.shost); scsi_remove_host(asd_ha->sas_ha.core.shost); scsi_host_put(asd_ha->sas_ha.core.shost); kfree(asd_ha->sas_ha.sas_phy); kfree(asd_ha->sas_ha.sas_port); return err; } static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { const struct asd_pcidev_struct *asd_dev; unsigned asd_id = (unsigned) id->driver_data; struct asd_ha_struct *asd_ha; struct Scsi_Host *shost; int err; if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) { asd_printk("wrong driver_data in PCI table\n"); return -ENODEV; } if ((err = pci_enable_device(dev))) { asd_printk("couldn't enable device %s\n", pci_name(dev)); return err; } pci_set_master(dev); err = -ENOMEM; shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *)); if (!shost) goto Err; asd_dev = &asd_pcidev_data[asd_id]; asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL); if (!asd_ha) { asd_printk("out of memory\n"); goto Err_put; } asd_ha->pcidev = dev; asd_ha->sas_ha.dev = &asd_ha->pcidev->dev; asd_ha->sas_ha.lldd_ha = asd_ha; asd_ha->bios_status = FLASH_OK; asd_ha->name = asd_dev->name; asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev)); SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha; asd_ha->sas_ha.core.shost = shost; shost->transportt = aic94xx_transport_template; shost->max_id = ~0; shost->max_lun = ~0; shost->max_cmd_len = 16; err = scsi_add_host(shost, &dev->dev); if (err) goto Err_free; err = asd_dev->setup(asd_ha); if (err) goto Err_remove; err = -ENODEV; if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) ; else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32)) && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32))) ; else { asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); goto Err_remove; } pci_set_drvdata(dev, asd_ha); err = asd_map_ha(asd_ha); if (err) goto Err_remove; err = asd_create_ha_caches(asd_ha); if (err) goto Err_unmap; err = asd_init_hw(asd_ha); if (err) goto Err_free_cache; asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled " "phys, flash %s, BIOS %s%d\n", pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr), asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys, asd_ha->hw_prof.flash.present ? "present" : "not present", asd_ha->hw_prof.bios.present ? "build " : "not present", asd_ha->hw_prof.bios.bld); shost->can_queue = asd_ha->seq.can_queue; if (use_msi) pci_enable_msi(asd_ha->pcidev); err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, IRQF_SHARED, ASD_DRIVER_NAME, asd_ha); if (err) { asd_printk("couldn't get irq %d for %s\n", asd_ha->pcidev->irq, pci_name(asd_ha->pcidev)); goto Err_irq; } asd_enable_ints(asd_ha); err = asd_init_post_escbs(asd_ha); if (err) { asd_printk("couldn't post escbs for %s\n", pci_name(asd_ha->pcidev)); goto Err_escbs; } ASD_DPRINTK("escbs posted\n"); err = asd_create_dev_attrs(asd_ha); if (err) goto Err_dev_attrs; err = asd_register_sas_ha(asd_ha); if (err) goto Err_reg_sas; scsi_scan_host(shost); return 0; Err_reg_sas: asd_remove_dev_attrs(asd_ha); Err_dev_attrs: Err_escbs: asd_disable_ints(asd_ha); free_irq(dev->irq, asd_ha); Err_irq: if (use_msi) pci_disable_msi(dev); asd_chip_hardrst(asd_ha); Err_free_cache: asd_destroy_ha_caches(asd_ha); Err_unmap: asd_unmap_ha(asd_ha); Err_remove: scsi_remove_host(shost); Err_free: kfree(asd_ha); Err_put: scsi_host_put(shost); Err: pci_disable_device(dev); return err; } static void asd_free_queues(struct asd_ha_struct *asd_ha) { unsigned long flags; LIST_HEAD(pending); struct list_head *n, *pos; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); asd_ha->seq.pending = 0; list_splice_init(&asd_ha->seq.pend_q, &pending); spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); if (!list_empty(&pending)) ASD_DPRINTK("Uh-oh! Pending is not empty!\n"); list_for_each_safe(pos, n, &pending) { struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); /* * Delete unexpired ascb timers. This may happen if we issue * a CONTROL PHY scb to an adapter and rmmod before the scb * times out. Apparently we don't wait for the CONTROL PHY * to complete, so it doesn't matter if we kill the timer. */ del_timer_sync(&ascb->timer); WARN_ON(ascb->scb->header.opcode != CONTROL_PHY); list_del_init(pos); ASD_DPRINTK("freeing from pending\n"); asd_ascb_free(ascb); } } static void asd_turn_off_leds(struct asd_ha_struct *asd_ha) { u8 phy_mask = asd_ha->hw_prof.enabled_phys; u8 i; for_each_phy(phy_mask, phy_mask, i) { asd_turn_led(asd_ha, i, 0); asd_control_led(asd_ha, i, 0); } } static void asd_pci_remove(struct pci_dev *dev) { struct asd_ha_struct *asd_ha = pci_get_drvdata(dev); if (!asd_ha) return; asd_unregister_sas_ha(asd_ha); asd_disable_ints(asd_ha); asd_remove_dev_attrs(asd_ha); /* XXX more here as needed */ free_irq(dev->irq, asd_ha); if (use_msi) pci_disable_msi(asd_ha->pcidev); asd_turn_off_leds(asd_ha); asd_chip_hardrst(asd_ha); asd_free_queues(asd_ha); asd_destroy_ha_caches(asd_ha); asd_unmap_ha(asd_ha); kfree(asd_ha); pci_disable_device(dev); return; } static void asd_scan_start(struct Scsi_Host *shost) { struct asd_ha_struct *asd_ha; int err; asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha; err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); if (err) asd_printk("Couldn't enable phys, err:%d\n", err); } static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time) { /* give the phy enabling interrupt event time to come in (1s * is empirically about all it takes) */ if (time < HZ) return 0; /* Wait for discovery to finish */ sas_drain_work(SHOST_TO_SAS_HA(shost)); return 1; } static ssize_t asd_version_show(struct device_driver *driver, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); } static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL); static int asd_create_driver_attrs(struct device_driver *driver) { return driver_create_file(driver, &driver_attr_version); } static void asd_remove_driver_attrs(struct device_driver *driver) { driver_remove_file(driver, &driver_attr_version); } static struct sas_domain_function_template aic94xx_transport_functions = { .lldd_dev_found = asd_dev_found, .lldd_dev_gone = asd_dev_gone, .lldd_execute_task = asd_execute_task, .lldd_abort_task = asd_abort_task, .lldd_abort_task_set = asd_abort_task_set, .lldd_clear_aca = asd_clear_aca, .lldd_clear_task_set = asd_clear_task_set, .lldd_I_T_nexus_reset = asd_I_T_nexus_reset, .lldd_lu_reset = asd_lu_reset, .lldd_query_task = asd_query_task, .lldd_clear_nexus_port = asd_clear_nexus_port, .lldd_clear_nexus_ha = asd_clear_nexus_ha, .lldd_control_phy = asd_control_phy, .lldd_ata_set_dmamode = asd_set_dmamode, }; static const struct pci_device_id aic94xx_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41E),0, 0, 1}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41F),0, 0, 1}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x430),0, 0, 2}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x432),0, 0, 2}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43E),0, 0, 2}, {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43F),0, 0, 2}, {} }; MODULE_DEVICE_TABLE(pci, aic94xx_pci_table); static struct pci_driver aic94xx_pci_driver = { .name = ASD_DRIVER_NAME, .id_table = aic94xx_pci_table, .probe = asd_pci_probe, .remove = asd_pci_remove, }; static int __init aic94xx_init(void) { int err; asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION, ASD_DRIVER_VERSION); err = asd_create_global_caches(); if (err) return err; aic94xx_transport_template = sas_domain_attach_transport(&aic94xx_transport_functions); if (!aic94xx_transport_template) goto out_destroy_caches; err = pci_register_driver(&aic94xx_pci_driver); if (err) goto out_release_transport; err = asd_create_driver_attrs(&aic94xx_pci_driver.driver); if (err) goto out_unregister_pcidrv; return err; out_unregister_pcidrv: pci_unregister_driver(&aic94xx_pci_driver); out_release_transport: sas_release_transport(aic94xx_transport_template); out_destroy_caches: asd_destroy_global_caches(); return err; } static void __exit aic94xx_exit(void) { asd_remove_driver_attrs(&aic94xx_pci_driver.driver); pci_unregister_driver(&aic94xx_pci_driver); sas_release_transport(aic94xx_transport_template); asd_release_firmware(); asd_destroy_global_caches(); asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, ASD_DRIVER_VERSION); } module_init(aic94xx_init); module_exit(aic94xx_exit); MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>"); MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION); MODULE_LICENSE("GPL v2"); MODULE_VERSION(ASD_DRIVER_VERSION);
gpl-2.0
Swapnil133609/Zeus_bacon
drivers/staging/rtl8712/rtl871x_recv.c
3990
21656
/****************************************************************************** * rtl871x_recv.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_RECV_C_ #include <linux/slab.h> #include <linux/kmemleak.h> #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "mlme_osdep.h" #include "ip.h" #include "if_ether.h" #include "ethernet.h" #include "usb_ops.h" #include "wifi.h" static const u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37}; /* Datagram Delivery Protocol */ static const u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3}; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static const u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8}; /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static const u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; void _r8712_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv) { memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv)); spin_lock_init(&psta_recvpriv->lock); _init_queue(&psta_recvpriv->defrag_q); } sint _r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter) { sint i; union recv_frame *precvframe; memset((unsigned char *)precvpriv, 0, sizeof(struct recv_priv)); spin_lock_init(&precvpriv->lock); _init_queue(&precvpriv->free_recv_queue); _init_queue(&precvpriv->recv_pending_queue); precvpriv->adapter = padapter; precvpriv->free_recvframe_cnt = NR_RECVFRAME; precvpriv->pallocated_frame_buf = _malloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); if (precvpriv->pallocated_frame_buf == NULL) return _FAIL; kmemleak_not_leak(precvpriv->pallocated_frame_buf); memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - ((addr_t)(precvpriv->pallocated_frame_buf) & (RXFRAME_ALIGN_SZ-1)); precvframe = (union recv_frame *)precvpriv->precv_frame_buf; for (i = 0; i < NR_RECVFRAME; i++) { _init_listhead(&(precvframe->u.list)); list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue)); r8712_os_recv_resource_alloc(padapter, precvframe); precvframe->u.hdr.adapter = padapter; precvframe++; } precvpriv->rx_pending_cnt = 1; return r8712_init_recv_priv(precvpriv, padapter); } void _r8712_free_recv_priv(struct recv_priv *precvpriv) { kfree(precvpriv->pallocated_frame_buf); r8712_free_recv_priv(precvpriv); } union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue) { unsigned long irqL; union recv_frame *precvframe; struct list_head *plist, *phead; struct _adapter *padapter; struct recv_priv *precvpriv; spin_lock_irqsave(&pfree_recv_queue->lock, irqL); if (_queue_empty(pfree_recv_queue) == true) precvframe = NULL; else { phead = get_list_head(pfree_recv_queue); plist = get_next(phead); precvframe = LIST_CONTAINOR(plist, union recv_frame, u); list_delete(&precvframe->u.hdr.list); padapter = precvframe->u.hdr.adapter; if (padapter != NULL) { precvpriv = &padapter->recvpriv; if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt--; } } spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL); return precvframe; } /* caller : defrag; recvframe_chk_defrag in recv_thread (passive) pframequeue: defrag_queue : will be accessed in recv_thread (passive) using spin_lock to protect */ void r8712_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue) { union recv_frame *precvframe; struct list_head *plist, *phead; spin_lock(&pframequeue->lock); phead = get_list_head(pframequeue); plist = get_next(phead); while (end_of_queue_search(phead, plist) == false) { precvframe = LIST_CONTAINOR(plist, union recv_frame, u); plist = get_next(plist); r8712_free_recvframe(precvframe, pfree_recv_queue); } spin_unlock(&pframequeue->lock); } sint r8712_recvframe_chkmic(struct _adapter *adapter, union recv_frame *precvframe) { sint i, res = _SUCCESS; u32 datalen; u8 miccode[8]; u8 bmic_err = false; u8 *pframe, *payload, *pframemic; u8 *mickey, idx, *iv; struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; stainfo = r8712_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]); if (prxattrib->encrypt == _TKIP_) { /* calculate mic code */ if (stainfo != NULL) { if (IS_MCAST(prxattrib->ra)) { iv = precvframe->u.hdr.rx_data + prxattrib->hdrlen; idx = iv[3]; mickey = &psecuritypriv->XGrprxmickey[(((idx >> 6) & 0x3)) - 1].skey[0]; if (psecuritypriv->binstallGrpkey == false) return _FAIL; } else mickey = &stainfo->tkiprxmickey.skey[0]; /*icv_len included the mic code*/ datalen = precvframe->u.hdr.len - prxattrib->hdrlen - prxattrib->iv_len - prxattrib->icv_len - 8; pframe = precvframe->u.hdr.rx_data; payload = pframe + prxattrib->hdrlen + prxattrib->iv_len; seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); pframemic = payload + datalen; bmic_err = false; for (i = 0; i < 8; i++) { if (miccode[i] != *(pframemic + i)) bmic_err = true; } if (bmic_err == true) { if (prxattrib->bdecrypted == true) r8712_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra)); res = _FAIL; } else { /* mic checked ok */ if ((psecuritypriv->bcheck_grpkey == false) && (IS_MCAST(prxattrib->ra) == true)) psecuritypriv->bcheck_grpkey = true; } recvframe_pull_tail(precvframe, 8); } } return res; } /* decrypt and set the ivlen,icvlen of the recv_frame */ union recv_frame *r8712_decryptor(struct _adapter *padapter, union recv_frame *precv_frame) { struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; union recv_frame *return_packet = precv_frame; if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt == true))) { psecuritypriv->hw_decrypted = false; switch (prxattrib->encrypt) { case _WEP40_: case _WEP104_: r8712_wep_decrypt(padapter, (u8 *)precv_frame); break; case _TKIP_: r8712_tkip_decrypt(padapter, (u8 *)precv_frame); break; case _AES_: r8712_aes_decrypt(padapter, (u8 *)precv_frame); break; default: break; } } else if (prxattrib->bdecrypted == 1) psecuritypriv->hw_decrypted = true; return return_packet; } /*###set the security information in the recv_frame */ union recv_frame *r8712_portctrl(struct _adapter *adapter, union recv_frame *precv_frame) { u8 *psta_addr, *ptr; uint auth_alg; struct recv_frame_hdr *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv; union recv_frame *prtnframe; u16 ether_type = 0; pstapriv = &adapter->stapriv; ptr = get_recvframe_data(precv_frame); pfhdr = &precv_frame->u.hdr; psta_addr = pfhdr->attrib.ta; psta = r8712_get_stainfo(pstapriv, psta_addr); auth_alg = adapter->securitypriv.AuthAlgrthm; if (auth_alg == 2) { if ((psta != NULL) && (psta->ieee8021x_blocked)) { /* blocked * only accept EAPOL frame */ prtnframe = precv_frame; /*get ether_type */ ptr = ptr + pfhdr->attrib.hdrlen + pfhdr->attrib.iv_len + LLC_HEADER_SIZE; memcpy(&ether_type, ptr, 2); ether_type = ntohs((unsigned short)ether_type); if (ether_type == 0x888e) prtnframe = precv_frame; else { /*free this frame*/ r8712_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue); prtnframe = NULL; } } else { /* allowed * check decryption status, and decrypt the * frame if needed */ prtnframe = precv_frame; /* check is the EAPOL frame or not (Rekey) */ if (ether_type == 0x888e) { /* check Rekey */ prtnframe = precv_frame; } } } else prtnframe = precv_frame; return prtnframe; } static sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache) { sint tid = precv_frame->u.hdr.attrib.priority; u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) | (precv_frame->u.hdr.attrib.frag_num & 0xf); if (tid > 15) return _FAIL; if (seq_ctrl == prxcache->tid_rxseq[tid]) return _FAIL; prxcache->tid_rxseq[tid] = seq_ctrl; return _SUCCESS; } static sint sta2sta_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { u8 *ptr = precv_frame->u.hdr.rx_data; sint ret = _SUCCESS; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); u8 *sta_addr = NULL; sint bmcast = IS_MCAST(pattrib->dst); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) { /* filter packets that SA is myself or multicast or broadcast */ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) return _FAIL; if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) return _FAIL; if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) return _FAIL; sta_addr = pattrib->src; } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { /* For Station mode, sa and bssid should always be BSSID, * and DA is my mac-address */ if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) return _FAIL; sta_addr = pattrib->bssid; } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { if (bmcast) { /* For AP mode, if DA == MCAST, then BSSID should * be also MCAST */ if (!IS_MCAST(pattrib->bssid)) return _FAIL; } else { /* not mc-frame */ /* For AP mode, if DA is non-MCAST, then it must be * BSSID, and bssid == BSSID */ if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) return _FAIL; sta_addr = pattrib->src; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) { memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); sta_addr = mybssid; } else ret = _FAIL; if (bmcast) *psta = r8712_get_bcmc_stainfo(adapter); else *psta = r8712_get_stainfo(pstapriv, sta_addr); /* get ap_info */ if (*psta == NULL) { if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) adapter->mppriv.rx_pktloss++; return _FAIL; } return ret; } static sint ap2sta_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { u8 *ptr = precv_frame->u.hdr.rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); sint bmcast = IS_MCAST(pattrib->dst); if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) && (check_fwstate(pmlmepriv, _FW_LINKED) == true)) { /* if NULL-frame, drop packet */ if ((GetFrameSubType(ptr)) == WIFI_DATA_NULL) return _FAIL; /* drop QoS-SubType Data, including QoS NULL, * excluding QoS-Data */ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { if (GetFrameSubType(ptr) & (BIT(4) | BIT(5) | BIT(6))) return _FAIL; } /* filter packets that SA is myself or multicast or broadcast */ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) return _FAIL; /* da should be for me */ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) return _FAIL; /* check BSSID */ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) return _FAIL; if (bmcast) *psta = r8712_get_bcmc_stainfo(adapter); else *psta = r8712_get_stainfo(pstapriv, pattrib->bssid); if (*psta == NULL) return _FAIL; } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_LINKED) == true)) { memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); memcpy(pattrib->bssid, mybssid, ETH_ALEN); *psta = r8712_get_stainfo(pstapriv, pattrib->bssid); if (*psta == NULL) return _FAIL; } else return _FAIL; return _SUCCESS; } static sint sta2ap_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; unsigned char *mybssid = get_bssid(pmlmepriv); if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { /* For AP mode, if DA is non-MCAST, then it must be BSSID, * and bssid == BSSID * For AP mode, RA=BSSID, TX=STA(SRC_ADDR), A3=DST_ADDR */ if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) return _FAIL; *psta = r8712_get_stainfo(pstapriv, pattrib->src); if (*psta == NULL) return _FAIL; } return _SUCCESS; } static sint validate_recv_ctrl_frame(struct _adapter *adapter, union recv_frame *precv_frame) { return _FAIL; } static sint validate_recv_mgnt_frame(struct _adapter *adapter, union recv_frame *precv_frame) { return _FAIL; } static sint validate_recv_data_frame(struct _adapter *adapter, union recv_frame *precv_frame) { int res; u8 bretry; u8 *psa, *pda, *pbssid; struct sta_info *psta = NULL; u8 *ptr = precv_frame->u.hdr.rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; bretry = GetRetry(ptr); pda = get_da(ptr); psa = get_sa(ptr); pbssid = get_hdr_bssid(ptr); if (pbssid == NULL) return _FAIL; memcpy(pattrib->dst, pda, ETH_ALEN); memcpy(pattrib->src, psa, ETH_ALEN); memcpy(pattrib->bssid, pbssid, ETH_ALEN); switch (pattrib->to_fr_ds) { case 0: memcpy(pattrib->ra, pda, ETH_ALEN); memcpy(pattrib->ta, psa, ETH_ALEN); res = sta2sta_data_frame(adapter, precv_frame, &psta); break; case 1: memcpy(pattrib->ra, pda, ETH_ALEN); memcpy(pattrib->ta, pbssid, ETH_ALEN); res = ap2sta_data_frame(adapter, precv_frame, &psta); break; case 2: memcpy(pattrib->ra, pbssid, ETH_ALEN); memcpy(pattrib->ta, psa, ETH_ALEN); res = sta2ap_data_frame(adapter, precv_frame, &psta); break; case 3: memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN); return _FAIL; default: return _FAIL; } if (res == _FAIL) return _FAIL; if (psta == NULL) return _FAIL; else precv_frame->u.hdr.psta = psta; pattrib->amsdu = 0; /* parsing QC field */ if (pattrib->qos == 1) { pattrib->priority = GetPriority((ptr + 24)); pattrib->ack_policy = GetAckpolicy((ptr + 24)); pattrib->amsdu = GetAMsdu((ptr + 24)); pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26; } else { pattrib->priority = 0; pattrib->hdrlen = (pattrib->to_fr_ds == 3) ? 30 : 24; } if (pattrib->order)/*HT-CTRL 11n*/ pattrib->hdrlen += 4; precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority]; /* decache, drop duplicate recv packets */ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) return _FAIL; if (pattrib->privacy) { GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra)); SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt); } else { pattrib->encrypt = 0; pattrib->iv_len = pattrib->icv_len = 0; } return _SUCCESS; } sint r8712_validate_recv_frame(struct _adapter *adapter, union recv_frame *precv_frame) { /*shall check frame subtype, to / from ds, da, bssid */ /*then call check if rx seq/frag. duplicated.*/ u8 type; u8 subtype; sint retval = _SUCCESS; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; u8 *ptr = precv_frame->u.hdr.rx_data; u8 ver = (unsigned char)(*ptr) & 0x3; /*add version chk*/ if (ver != 0) return _FAIL; type = GetFrameType(ptr); subtype = GetFrameSubType(ptr); /*bit(7)~bit(2)*/ pattrib->to_fr_ds = get_tofr_ds(ptr); pattrib->frag_num = GetFragNum(ptr); pattrib->seq_num = GetSequence(ptr); pattrib->pw_save = GetPwrMgt(ptr); pattrib->mfrag = GetMFrag(ptr); pattrib->mdata = GetMData(ptr); pattrib->privacy = GetPrivacy(ptr); pattrib->order = GetOrder(ptr); switch (type) { case WIFI_MGT_TYPE: /*mgnt*/ retval = validate_recv_mgnt_frame(adapter, precv_frame); break; case WIFI_CTRL_TYPE:/*ctrl*/ retval = validate_recv_ctrl_frame(adapter, precv_frame); break; case WIFI_DATA_TYPE: /*data*/ pattrib->qos = (subtype & BIT(7)) ? 1 : 0; retval = validate_recv_data_frame(adapter, precv_frame); break; default: return _FAIL; } return retval; } sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe) { /*remove the wlanhdr and add the eth_hdr*/ sint rmv_len; u16 eth_type, len; u8 bsnaphdr; u8 *psnap_type; struct ieee80211_snap_hdr *psnap; sint ret = _SUCCESS; struct _adapter *adapter = precvframe->u.hdr.adapter; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *ptr = get_recvframe_data(precvframe); /*point to frame_ctrl field*/ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib; if (pattrib->encrypt) recvframe_pull_tail(precvframe, pattrib->icv_len); psnap = (struct ieee80211_snap_hdr *)(ptr + pattrib->hdrlen + pattrib->iv_len); psnap_type = ptr + pattrib->hdrlen + pattrib->iv_len + SNAP_SIZE; /* convert hdr + possible LLC headers into Ethernet header */ if ((!memcmp(psnap, (void *)rfc1042_header, SNAP_SIZE) && (memcmp(psnap_type, (void *)SNAP_ETH_TYPE_IPX, 2)) && (memcmp(psnap_type, (void *)SNAP_ETH_TYPE_APPLETALK_AARP, 2))) || !memcmp(psnap, (void *)bridge_tunnel_header, SNAP_SIZE)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ bsnaphdr = true; } else { /* Leave Ethernet header part of hdr and full payload */ bsnaphdr = false; } rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0); len = precvframe->u.hdr.len - rmv_len; if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) { ptr += rmv_len; *ptr = 0x87; *(ptr+1) = 0x12; eth_type = 0x8712; /* append rx status for mp test packets */ ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + 2) - 24); memcpy(ptr, get_rxmem(precvframe), 24); ptr += 24; } else ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); memcpy(ptr, pattrib->dst, ETH_ALEN); memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN); if (!bsnaphdr) { len = htons(len); memcpy(ptr + 12, &len, 2); } return ret; } s32 r8712_recv_entry(union recv_frame *precvframe) { struct _adapter *padapter; struct recv_priv *precvpriv; struct mlme_priv *pmlmepriv; struct recv_stat *prxstat; struct dvobj_priv *pdev; u8 *phead, *pdata, *ptail, *pend; struct __queue *pfree_recv_queue, *ppending_recv_queue; s32 ret = _SUCCESS; struct intf_hdl *pintfhdl; padapter = precvframe->u.hdr.adapter; pintfhdl = &padapter->pio_queue->intf; pmlmepriv = &padapter->mlmepriv; precvpriv = &(padapter->recvpriv); pdev = &padapter->dvobjpriv; pfree_recv_queue = &(precvpriv->free_recv_queue); ppending_recv_queue = &(precvpriv->recv_pending_queue); phead = precvframe->u.hdr.rx_head; pdata = precvframe->u.hdr.rx_data; ptail = precvframe->u.hdr.rx_tail; pend = precvframe->u.hdr.rx_end; prxstat = (struct recv_stat *)phead; padapter->ledpriv.LedControlHandler(padapter, LED_CTL_RX); ret = recv_func(padapter, precvframe); if (ret == _FAIL) goto _recv_entry_drop; precvpriv->rx_pkts++; precvpriv->rx_bytes += (uint)(precvframe->u.hdr.rx_tail - precvframe->u.hdr.rx_data); return ret; _recv_entry_drop: precvpriv->rx_drop++; padapter->mppriv.rx_pktloss = precvpriv->rx_drop; return ret; }
gpl-2.0
kim6515516/khypervisor_native_linux_3.8_for_rtsm
sound/usb/misc/ua101.c
4246
38105
/* * Edirol UA-101/UA-1000 driver * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * This driver is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "../usbaudio.h" #include "../midi.h" MODULE_DESCRIPTION("Edirol UA-101/1000 driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101},{Edirol,UA-1000}}"); /* * Should not be lower than the minimum scheduling delay of the host * controller. Some Intel controllers need more than one frame; as long as * that driver doesn't tell us about this, use 1.5 frames just to be sure. */ #define MIN_QUEUE_LENGTH 12 /* Somewhat random. */ #define MAX_QUEUE_LENGTH 30 /* * This magic value optimizes memory usage efficiency for the UA-101's packet * sizes at all sample rates, taking into account the stupid cache pool sizes * that usb_alloc_coherent() uses. */ #define DEFAULT_QUEUE_LENGTH 21 #define MAX_PACKET_SIZE 672 /* hardware specific */ #define MAX_MEMORY_BUFFERS DIV_ROUND_UP(MAX_QUEUE_LENGTH, \ PAGE_SIZE / MAX_PACKET_SIZE) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static unsigned int queue_length = 21; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "card index"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "enable card"); module_param(queue_length, uint, 0644); MODULE_PARM_DESC(queue_length, "USB queue length in microframes, " __stringify(MIN_QUEUE_LENGTH)"-"__stringify(MAX_QUEUE_LENGTH)); enum { INTF_PLAYBACK, INTF_CAPTURE, INTF_MIDI, INTF_COUNT }; /* bits in struct ua101::states */ enum { USB_CAPTURE_RUNNING, USB_PLAYBACK_RUNNING, ALSA_CAPTURE_OPEN, ALSA_PLAYBACK_OPEN, ALSA_CAPTURE_RUNNING, ALSA_PLAYBACK_RUNNING, CAPTURE_URB_COMPLETED, PLAYBACK_URB_COMPLETED, DISCONNECTED, }; struct ua101 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf[INTF_COUNT]; int card_index; struct snd_pcm *pcm; struct list_head midi_list; u64 format_bit; unsigned int rate; unsigned int packets_per_second; spinlock_t lock; struct mutex mutex; unsigned long states; /* FIFO to synchronize playback rate to capture rate */ unsigned int rate_feedback_start; unsigned int rate_feedback_count; u8 rate_feedback[MAX_QUEUE_LENGTH]; struct list_head ready_playback_urbs; struct tasklet_struct playback_tasklet; wait_queue_head_t alsa_capture_wait; wait_queue_head_t rate_feedback_wait; wait_queue_head_t alsa_playback_wait; struct ua101_stream { struct snd_pcm_substream *substream; unsigned int usb_pipe; unsigned int channels; unsigned int frame_bytes; unsigned int max_packet_bytes; unsigned int period_pos; unsigned int buffer_pos; unsigned int queue_length; struct ua101_urb { struct urb urb; struct usb_iso_packet_descriptor iso_frame_desc[1]; struct list_head ready_list; } *urbs[MAX_QUEUE_LENGTH]; struct { unsigned int size; void *addr; dma_addr_t dma; } buffers[MAX_MEMORY_BUFFERS]; } capture, playback; }; static DEFINE_MUTEX(devices_mutex); static unsigned int devices_used; static struct usb_driver ua101_driver; static void abort_alsa_playback(struct ua101 *ua); static void abort_alsa_capture(struct ua101 *ua); static const char *usb_error_string(int err) { switch (err) { case -ENODEV: return "no device"; case -ENOENT: return "endpoint not enabled"; case -EPIPE: return "endpoint stalled"; case -ENOSPC: return "not enough bandwidth"; case -ESHUTDOWN: return "device disabled"; case -EHOSTUNREACH: return "device suspended"; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: return "internal error"; default: return "unknown error"; } } static void abort_usb_capture(struct ua101 *ua) { if (test_and_clear_bit(USB_CAPTURE_RUNNING, &ua->states)) { wake_up(&ua->alsa_capture_wait); wake_up(&ua->rate_feedback_wait); } } static void abort_usb_playback(struct ua101 *ua) { if (test_and_clear_bit(USB_PLAYBACK_RUNNING, &ua->states)) wake_up(&ua->alsa_playback_wait); } static void playback_urb_complete(struct urb *usb_urb) { struct ua101_urb *urb = (struct ua101_urb *)usb_urb; struct ua101 *ua = urb->urb.context; unsigned long flags; if (unlikely(urb->urb.status == -ENOENT || /* unlinked */ urb->urb.status == -ENODEV || /* device removed */ urb->urb.status == -ECONNRESET || /* unlinked */ urb->urb.status == -ESHUTDOWN)) { /* device disabled */ abort_usb_playback(ua); abort_alsa_playback(ua); return; } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) { /* append URB to FIFO */ spin_lock_irqsave(&ua->lock, flags); list_add_tail(&urb->ready_list, &ua->ready_playback_urbs); if (ua->rate_feedback_count > 0) tasklet_schedule(&ua->playback_tasklet); ua->playback.substream->runtime->delay -= urb->urb.iso_frame_desc[0].length / ua->playback.frame_bytes; spin_unlock_irqrestore(&ua->lock, flags); } } static void first_playback_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = playback_urb_complete; playback_urb_complete(urb); set_bit(PLAYBACK_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_playback_wait); } /* copy data from the ALSA ring buffer into the URB buffer */ static bool copy_playback_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; const u8 *source; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; source = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(urb->transfer_buffer, source, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(urb->transfer_buffer, source, frames1 * frame_bytes); memcpy(urb->transfer_buffer + frames1 * frame_bytes, runtime->dma_area, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static inline void add_with_wraparound(struct ua101 *ua, unsigned int *value, unsigned int add) { *value += add; if (*value >= ua->playback.queue_length) *value -= ua->playback.queue_length; } static void playback_tasklet(unsigned long data) { struct ua101 *ua = (void *)data; unsigned long flags; unsigned int frames; struct ua101_urb *urb; bool do_period_elapsed = false; int err; if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states))) return; /* * Synchronizing the playback rate to the capture rate is done by using * the same sequence of packet sizes for both streams. * Submitting a playback URB therefore requires both a ready URB and * the size of the corresponding capture packet, i.e., both playback * and capture URBs must have been completed. Since the USB core does * not guarantee that playback and capture complete callbacks are * called alternately, we use two FIFOs for packet sizes and read URBs; * submitting playback URBs is possible as long as both FIFOs are * nonempty. */ spin_lock_irqsave(&ua->lock, flags); while (ua->rate_feedback_count > 0 && !list_empty(&ua->ready_playback_urbs)) { /* take packet size out of FIFO */ frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; /* take URB out of FIFO */ urb = list_first_entry(&ua->ready_playback_urbs, struct ua101_urb, ready_list); list_del(&urb->ready_list); /* fill packet with data or silence */ urb->urb.iso_frame_desc[0].length = frames * ua->playback.frame_bytes; if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) do_period_elapsed |= copy_playback_data(&ua->playback, &urb->urb, frames); else memset(urb->urb.transfer_buffer, 0, urb->urb.iso_frame_desc[0].length); /* and off you go ... */ err = usb_submit_urb(&urb->urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); abort_usb_playback(ua); abort_alsa_playback(ua); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return; } ua->playback.substream->runtime->delay += frames; } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(ua->playback.substream); } /* copy data from the URB buffer into the ALSA ring buffer */ static bool copy_capture_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; u8 *dest; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; dest = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(dest, urb->transfer_buffer, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(dest, urb->transfer_buffer, frames1 * frame_bytes); memcpy(runtime->dma_area, urb->transfer_buffer + frames1 * frame_bytes, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static void capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; struct ua101_stream *stream = &ua->capture; unsigned long flags; unsigned int frames, write_ptr; bool do_period_elapsed; int err; if (unlikely(urb->status == -ENOENT || /* unlinked */ urb->status == -ENODEV || /* device removed */ urb->status == -ECONNRESET || /* unlinked */ urb->status == -ESHUTDOWN)) /* device disabled */ goto stream_stopped; if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0) frames = urb->iso_frame_desc[0].actual_length / stream->frame_bytes; else frames = 0; spin_lock_irqsave(&ua->lock, flags); if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) do_period_elapsed = copy_capture_data(stream, urb, frames); else do_period_elapsed = false; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) { err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); goto stream_stopped; } /* append packet size to FIFO */ write_ptr = ua->rate_feedback_start; add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count); ua->rate_feedback[write_ptr] = frames; if (ua->rate_feedback_count < ua->playback.queue_length) { ua->rate_feedback_count++; if (ua->rate_feedback_count == ua->playback.queue_length) wake_up(&ua->rate_feedback_wait); } else { /* * Ring buffer overflow; this happens when the playback * stream is not running. Throw away the oldest entry, * so that the playback stream, when it starts, sees * the most recent packet sizes. */ add_with_wraparound(ua, &ua->rate_feedback_start, 1); } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) && !list_empty(&ua->ready_playback_urbs)) tasklet_schedule(&ua->playback_tasklet); } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(stream->substream); return; stream_stopped: abort_usb_playback(ua); abort_usb_capture(ua); abort_alsa_playback(ua); abort_alsa_capture(ua); } static void first_capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = capture_urb_complete; capture_urb_complete(urb); set_bit(CAPTURE_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_capture_wait); } static int submit_stream_urbs(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { int err = usb_submit_urb(&stream->urbs[i]->urb, GFP_KERNEL); if (err < 0) { dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void kill_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) if (stream->urbs[i]) usb_kill_urb(&stream->urbs[i]->urb); } static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 1) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 1); if (err < 0) { dev_err(&ua->dev->dev, "cannot initialize interface; error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; if (!ua->intf[intf_index]) return; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 0) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 0); if (err < 0 && !test_bit(DISCONNECTED, &ua->states)) dev_warn(&ua->dev->dev, "interface reset failed; error %d: %s\n", err, usb_error_string(err)); } } static void stop_usb_capture(struct ua101 *ua) { clear_bit(USB_CAPTURE_RUNNING, &ua->states); kill_stream_urbs(&ua->capture); disable_iso_interface(ua, INTF_CAPTURE); } static int start_usb_capture(struct ua101 *ua) { int err; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->capture); err = enable_iso_interface(ua, INTF_CAPTURE); if (err < 0) return err; clear_bit(CAPTURE_URB_COMPLETED, &ua->states); ua->capture.urbs[0]->urb.complete = first_capture_urb_complete; ua->rate_feedback_start = 0; ua->rate_feedback_count = 0; set_bit(USB_CAPTURE_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->capture); if (err < 0) stop_usb_capture(ua); return err; } static void stop_usb_playback(struct ua101 *ua) { clear_bit(USB_PLAYBACK_RUNNING, &ua->states); kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); disable_iso_interface(ua, INTF_PLAYBACK); } static int start_usb_playback(struct ua101 *ua) { unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err; } static void abort_alsa_capture(struct ua101 *ua) { if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN); } static void abort_alsa_playback(struct ua101 *ua) { if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN); } static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream, unsigned int channels) { int err; substream->runtime->hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_FIFO_IN_FRAMES; substream->runtime->hw.formats = ua->format_bit; substream->runtime->hw.rates = snd_pcm_rate_to_rate_bit(ua->rate); substream->runtime->hw.rate_min = ua->rate; substream->runtime->hw.rate_max = ua->rate; substream->runtime->hw.channels_min = channels; substream->runtime->hw.channels_max = channels; substream->runtime->hw.buffer_bytes_max = 45000 * 1024; substream->runtime->hw.period_bytes_min = 1; substream->runtime->hw.period_bytes_max = UINT_MAX; substream->runtime->hw.periods_min = 2; substream->runtime->hw.periods_max = UINT_MAX; err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1500000 / ua->packets_per_second, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); return err; } static int capture_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->capture.substream = substream; err = set_stream_hw(ua, substream, ua->capture.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate, ua->packets_per_second); substream->runtime->delay = substream->runtime->hw.fifo_size; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) set_bit(ALSA_CAPTURE_OPEN, &ua->states); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->playback.substream = substream; err = set_stream_hw(ua, substream, ua->playback.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate * ua->playback.queue_length, ua->packets_per_second); mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err < 0) goto error; err = start_usb_playback(ua); if (err < 0) { if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); goto error; } set_bit(ALSA_PLAYBACK_OPEN, &ua->states); error: mutex_unlock(&ua->mutex); return err; } static int capture_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); clear_bit(ALSA_CAPTURE_OPEN, &ua->states); if (!test_bit(ALSA_PLAYBACK_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int playback_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); stop_usb_playback(ua); clear_bit(ALSA_PLAYBACK_OPEN, &ua->states); if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int capture_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); } static int playback_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); } static int ua101_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_vmalloc_buffer(substream); } static int capture_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* * The EHCI driver schedules the first packet of an iso stream at 10 ms * in the future, i.e., no data is actually captured for that long. * Take the wait here so that the stream is known to be actually * running when the start trigger has been called. */ wait_event(ua->alsa_capture_wait, test_bit(CAPTURE_URB_COMPLETED, &ua->states) || !test_bit(USB_CAPTURE_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; ua->capture.period_pos = 0; ua->capture.buffer_pos = 0; return 0; } static int playback_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* see the comment in capture_pcm_prepare() */ wait_event(ua->alsa_playback_wait, test_bit(PLAYBACK_URB_COMPLETED, &ua->states) || !test_bit(USB_PLAYBACK_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; substream->runtime->delay = 0; ua->playback.period_pos = 0; ua->playback.buffer_pos = 0; return 0; } static int capture_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua, struct ua101_stream *stream) { unsigned long flags; unsigned int pos; spin_lock_irqsave(&ua->lock, flags); pos = stream->buffer_pos; spin_unlock_irqrestore(&ua->lock, flags); return pos; } static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->capture); } static snd_pcm_uframes_t playback_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->playback); } static struct snd_pcm_ops capture_pcm_ops = { .open = capture_pcm_open, .close = capture_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = capture_pcm_hw_params, .hw_free = ua101_pcm_hw_free, .prepare = capture_pcm_prepare, .trigger = capture_pcm_trigger, .pointer = capture_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops playback_pcm_ops = { .open = playback_pcm_open, .close = playback_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = playback_pcm_hw_params, .hw_free = ua101_pcm_hw_free, .prepare = playback_pcm_prepare, .trigger = playback_pcm_trigger, .pointer = playback_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static const struct uac_format_type_i_discrete_descriptor * find_format_descriptor(struct usb_interface *interface) { struct usb_host_interface *alt; u8 *extra; int extralen; if (interface->num_altsetting != 2) { dev_err(&interface->dev, "invalid num_altsetting\n"); return NULL; } alt = &interface->altsetting[0]; if (alt->desc.bNumEndpoints != 0) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } alt = &interface->altsetting[1]; if (alt->desc.bNumEndpoints != 1) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } extra = alt->extra; extralen = alt->extralen; while (extralen >= sizeof(struct usb_descriptor_header)) { struct uac_format_type_i_discrete_descriptor *desc; desc = (struct uac_format_type_i_discrete_descriptor *)extra; if (desc->bLength > extralen) { dev_err(&interface->dev, "descriptor overflow\n"); return NULL; } if (desc->bLength == UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1) && desc->bDescriptorType == USB_DT_CS_INTERFACE && desc->bDescriptorSubtype == UAC_FORMAT_TYPE) { if (desc->bFormatType != UAC_FORMAT_TYPE_I_PCM || desc->bSamFreqType != 1) { dev_err(&interface->dev, "invalid format type\n"); return NULL; } return desc; } extralen -= desc->bLength; extra += desc->bLength; } dev_err(&interface->dev, "sample format descriptor not found\n"); return NULL; } static int detect_usb_format(struct ua101 *ua) { const struct uac_format_type_i_discrete_descriptor *fmt_capture; const struct uac_format_type_i_discrete_descriptor *fmt_playback; const struct usb_endpoint_descriptor *epd; unsigned int rate2; fmt_capture = find_format_descriptor(ua->intf[INTF_CAPTURE]); fmt_playback = find_format_descriptor(ua->intf[INTF_PLAYBACK]); if (!fmt_capture || !fmt_playback) return -ENXIO; switch (fmt_capture->bSubframeSize) { case 3: ua->format_bit = SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: ua->format_bit = SNDRV_PCM_FMTBIT_S32_LE; break; default: dev_err(&ua->dev->dev, "sample width is not 24 or 32 bits\n"); return -ENXIO; } if (fmt_capture->bSubframeSize != fmt_playback->bSubframeSize) { dev_err(&ua->dev->dev, "playback/capture sample widths do not match\n"); return -ENXIO; } if (fmt_capture->bBitResolution != 24 || fmt_playback->bBitResolution != 24) { dev_err(&ua->dev->dev, "sample width is not 24 bits\n"); return -ENXIO; } ua->rate = combine_triple(fmt_capture->tSamFreq[0]); rate2 = combine_triple(fmt_playback->tSamFreq[0]); if (ua->rate != rate2) { dev_err(&ua->dev->dev, "playback/capture rates do not match: %u/%u\n", rate2, ua->rate); return -ENXIO; } switch (ua->dev->speed) { case USB_SPEED_FULL: ua->packets_per_second = 1000; break; case USB_SPEED_HIGH: ua->packets_per_second = 8000; break; default: dev_err(&ua->dev->dev, "unknown device speed\n"); return -ENXIO; } ua->capture.channels = fmt_capture->bNrChannels; ua->playback.channels = fmt_playback->bNrChannels; ua->capture.frame_bytes = fmt_capture->bSubframeSize * ua->capture.channels; ua->playback.frame_bytes = fmt_playback->bSubframeSize * ua->playback.channels; epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_in(epd)) { dev_err(&ua->dev->dev, "invalid capture endpoint\n"); return -ENXIO; } ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, usb_endpoint_num(epd)); ua->capture.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize); epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_out(epd)) { dev_err(&ua->dev->dev, "invalid playback endpoint\n"); return -ENXIO; } ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, usb_endpoint_num(epd)); ua->playback.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize); return 0; } static int alloc_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int remaining_packets, packets, packets_per_page, i; size_t size; stream->queue_length = queue_length; stream->queue_length = max(stream->queue_length, (unsigned int)MIN_QUEUE_LENGTH); stream->queue_length = min(stream->queue_length, (unsigned int)MAX_QUEUE_LENGTH); /* * The cache pool sizes used by usb_alloc_coherent() (128, 512, 2048) are * quite bad when used with the packet sizes of this device (e.g. 280, * 520, 624). Therefore, we allocate and subdivide entire pages, using * a smaller buffer only for the last chunk. */ remaining_packets = stream->queue_length; packets_per_page = PAGE_SIZE / stream->max_packet_bytes; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) { packets = min(remaining_packets, packets_per_page); size = packets * stream->max_packet_bytes; stream->buffers[i].addr = usb_alloc_coherent(ua->dev, size, GFP_KERNEL, &stream->buffers[i].dma); if (!stream->buffers[i].addr) return -ENOMEM; stream->buffers[i].size = size; remaining_packets -= packets; if (!remaining_packets) break; } if (remaining_packets) { dev_err(&ua->dev->dev, "too many packets\n"); return -ENXIO; } return 0; } static void free_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) usb_free_coherent(ua->dev, stream->buffers[i].size, stream->buffers[i].addr, stream->buffers[i].dma); } static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream, void (*urb_complete)(struct urb *)) { unsigned max_packet_size = stream->max_packet_bytes; struct ua101_urb *urb; unsigned int b, u = 0; for (b = 0; b < ARRAY_SIZE(stream->buffers); ++b) { unsigned int size = stream->buffers[b].size; u8 *addr = stream->buffers[b].addr; dma_addr_t dma = stream->buffers[b].dma; while (size >= max_packet_size) { if (u >= stream->queue_length) goto bufsize_error; urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) return -ENOMEM; usb_init_urb(&urb->urb); urb->urb.dev = ua->dev; urb->urb.pipe = stream->usb_pipe; urb->urb.transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->urb.transfer_buffer = addr; urb->urb.transfer_dma = dma; urb->urb.transfer_buffer_length = max_packet_size; urb->urb.number_of_packets = 1; urb->urb.interval = 1; urb->urb.context = ua; urb->urb.complete = urb_complete; urb->urb.iso_frame_desc[0].offset = 0; urb->urb.iso_frame_desc[0].length = max_packet_size; stream->urbs[u++] = urb; size -= max_packet_size; addr += max_packet_size; dma += max_packet_size; } } if (u == stream->queue_length) return 0; bufsize_error: dev_err(&ua->dev->dev, "internal buffer size error\n"); return -ENXIO; } static void free_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { kfree(stream->urbs[i]); stream->urbs[i] = NULL; } } static void free_usb_related_resources(struct ua101 *ua, struct usb_interface *interface) { unsigned int i; struct usb_interface *intf; mutex_lock(&ua->mutex); free_stream_urbs(&ua->capture); free_stream_urbs(&ua->playback); mutex_unlock(&ua->mutex); free_stream_buffers(ua, &ua->capture); free_stream_buffers(ua, &ua->playback); for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) { mutex_lock(&ua->mutex); intf = ua->intf[i]; ua->intf[i] = NULL; mutex_unlock(&ua->mutex); if (intf) { usb_set_intfdata(intf, NULL); if (intf != interface) usb_driver_release_interface(&ua101_driver, intf); } } } static void ua101_card_free(struct snd_card *card) { struct ua101 *ua = card->private_data; mutex_destroy(&ua->mutex); } static int ua101_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { static const struct snd_usb_midi_endpoint_info midi_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk midi_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &midi_ep }; static const int intf_numbers[2][3] = { { /* UA-101 */ [INTF_PLAYBACK] = 0, [INTF_CAPTURE] = 1, [INTF_MIDI] = 2, }, { /* UA-1000 */ [INTF_CAPTURE] = 1, [INTF_PLAYBACK] = 2, [INTF_MIDI] = 3, }, }; struct snd_card *card; struct ua101 *ua; unsigned int card_index, i; int is_ua1000; const char *name; char usb_path[32]; int err; is_ua1000 = usb_id->idProduct == 0x0044; if (interface->altsetting->desc.bInterfaceNumber != intf_numbers[is_ua1000][0]) return -ENODEV; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (enable[card_index] && !(devices_used & (1 << card_index))) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_create(index[card_index], id[card_index], THIS_MODULE, sizeof(*ua), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } card->private_free = ua101_card_free; ua = card->private_data; ua->dev = interface_to_usbdev(interface); ua->card = card; ua->card_index = card_index; INIT_LIST_HEAD(&ua->midi_list); spin_lock_init(&ua->lock); mutex_init(&ua->mutex); INIT_LIST_HEAD(&ua->ready_playback_urbs); tasklet_init(&ua->playback_tasklet, playback_tasklet, (unsigned long)ua); init_waitqueue_head(&ua->alsa_capture_wait); init_waitqueue_head(&ua->rate_feedback_wait); init_waitqueue_head(&ua->alsa_playback_wait); ua->intf[0] = interface; for (i = 1; i < ARRAY_SIZE(ua->intf); ++i) { ua->intf[i] = usb_ifnum_to_if(ua->dev, intf_numbers[is_ua1000][i]); if (!ua->intf[i]) { dev_err(&ua->dev->dev, "interface %u not found\n", intf_numbers[is_ua1000][i]); err = -ENXIO; goto probe_error; } err = usb_driver_claim_interface(&ua101_driver, ua->intf[i], ua); if (err < 0) { ua->intf[i] = NULL; err = -EBUSY; goto probe_error; } } snd_card_set_dev(card, &interface->dev); err = detect_usb_format(ua); if (err < 0) goto probe_error; name = usb_id->idProduct == 0x0044 ? "UA-1000" : "UA-101"; strcpy(card->driver, "UA-101"); strcpy(card->shortname, name); usb_make_path(ua->dev, usb_path, sizeof(usb_path)); snprintf(ua->card->longname, sizeof(ua->card->longname), "EDIROL %s (serial %s), %u Hz at %s, %s speed", name, ua->dev->serial ? ua->dev->serial : "?", ua->rate, usb_path, ua->dev->speed == USB_SPEED_HIGH ? "high" : "full"); err = alloc_stream_buffers(ua, &ua->capture); if (err < 0) goto probe_error; err = alloc_stream_buffers(ua, &ua->playback); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->capture, capture_urb_complete); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->playback, playback_urb_complete); if (err < 0) goto probe_error; err = snd_pcm_new(card, name, 0, 1, 1, &ua->pcm); if (err < 0) goto probe_error; ua->pcm->private_data = ua; strcpy(ua->pcm->name, name); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops); err = snd_usbmidi_create(card, ua->intf[INTF_MIDI], &ua->midi_list, &midi_quirk); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, ua); devices_used |= 1 << card_index; mutex_unlock(&devices_mutex); return 0; probe_error: free_usb_related_resources(ua, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void ua101_disconnect(struct usb_interface *interface) { struct ua101 *ua = usb_get_intfdata(interface); struct list_head *midi; if (!ua) return; mutex_lock(&devices_mutex); set_bit(DISCONNECTED, &ua->states); wake_up(&ua->rate_feedback_wait); /* make sure that userspace cannot create new requests */ snd_card_disconnect(ua->card); /* make sure that there are no pending USB requests */ __list_for_each(midi, &ua->midi_list) snd_usbmidi_disconnect(midi); abort_alsa_playback(ua); abort_alsa_capture(ua); mutex_lock(&ua->mutex); stop_usb_playback(ua); stop_usb_capture(ua); mutex_unlock(&ua->mutex); free_usb_related_resources(ua, interface); devices_used &= ~(1 << ua->card_index); snd_card_free_when_closed(ua->card); mutex_unlock(&devices_mutex); } static struct usb_device_id ua101_ids[] = { { USB_DEVICE(0x0582, 0x0044) }, /* UA-1000 high speed */ { USB_DEVICE(0x0582, 0x007d) }, /* UA-101 high speed */ { USB_DEVICE(0x0582, 0x008d) }, /* UA-101 full speed */ { } }; MODULE_DEVICE_TABLE(usb, ua101_ids); static struct usb_driver ua101_driver = { .name = "snd-ua101", .id_table = ua101_ids, .probe = ua101_probe, .disconnect = ua101_disconnect, #if 0 .suspend = ua101_suspend, .resume = ua101_resume, #endif }; module_usb_driver(ua101_driver);
gpl-2.0
eagleeyetom/android_kernel_mediatek
arch/powerpc/boot/treeboot-iss4xx.c
9366
2023
/* * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); static u32 ibm4xx_memstart; static void iss_4xx_fixups(void) { void *memory; u32 reg[3]; memory = finddevice("/memory"); if (!memory) fatal("Can't find memory node\n"); /* This assumes #address-cells = 2, #size-cells =1 and that */ getprop(memory, "reg", reg, sizeof(reg)); if (reg[2]) /* If the device tree specifies the memory range, use it */ ibm4xx_memstart = reg[1]; else /* othersize, read it from the SDRAM controller */ ibm4xx_sdram_fixup_memsize(); } static void *iss_4xx_vmlinux_alloc(unsigned long size) { return (void *)ibm4xx_memstart; } #define SPRN_PIR 0x11E /* Processor Indentification Register */ void platform_init(void) { unsigned long end_of_ram = 0x08000000; unsigned long avail_ram = end_of_ram - (unsigned long)_end; u32 pir_reg; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = iss_4xx_fixups; platform_ops.vmlinux_alloc = iss_4xx_vmlinux_alloc; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
ErikAndren/linux
net/rose/rose_timer.c
9622
4999
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static void rose_heartbeat_expiry(unsigned long); static void rose_timer_expiry(unsigned long); static void rose_idletimer_expiry(unsigned long); void rose_start_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); sk->sk_timer.data = (unsigned long)sk; sk->sk_timer.function = &rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; add_timer(&sk->sk_timer); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.data = (unsigned long)sk; rose->timer.function = &rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; add_timer(&rose->timer); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.data = (unsigned long)sk; rose->timer.function = &rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; add_timer(&rose->timer); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.data = (unsigned long)sk; rose->timer.function = &rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; add_timer(&rose->timer); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.data = (unsigned long)sk; rose->timer.function = &rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; add_timer(&rose->timer); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->idletimer); if (rose->idle > 0) { rose->idletimer.data = (unsigned long)sk; rose->idletimer.function = &rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; add_timer(&rose->idletimer); } } void rose_stop_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); } void rose_stop_timer(struct sock *sk) { del_timer(&rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { del_timer(&rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); return; } break; case ROSE_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (rose->condition & ROSE_COND_OWN_RX_BUSY)) { rose->condition &= ~ROSE_COND_OWN_RX_BUSY; rose->condition &= ~ROSE_COND_ACK_PENDING; rose->vl = rose->vr; rose_write_internal(sk, ROSE_RR); rose_stop_timer(sk); /* HB */ break; } break; } rose_start_heartbeat(sk); bh_unlock_sock(sk); } static void rose_timer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose->state = ROSE_STATE_2; rose_start_t3timer(sk); break; case ROSE_STATE_2: /* T3 */ rose->neighbour->use--; rose_disconnect(sk, ETIMEDOUT, -1, -1); break; case ROSE_STATE_3: /* HB */ if (rose->condition & ROSE_COND_ACK_PENDING) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_enquiry_response(sk); } break; } bh_unlock_sock(sk); } static void rose_idletimer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; bh_lock_sock(sk); rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_sk(sk)->state = ROSE_STATE_2; rose_start_t3timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); }
gpl-2.0
ochiman/e405-kernel
drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
151
3593
/* msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card is marked "KS003". The controller is I2C at address 0x30, but does not seem to respond to probes until a read is performed from a valid device. I don't know why... Note: This remote may be of similar or identical design to the Pixelview remote (?). The raw codes and duplicate button codes appear to be the same. Henry Wong <henry@stuffedcow.net> Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com> */ static struct rc_map_table msi_tvanywhere_plus[] = { /* ---- Remote Button Layout ---- POWER SOURCE SCAN MUTE TV/FM 1 2 3 |> 4 5 6 <| 7 8 9 ^^UP 0 + RECALL vvDN RECORD STOP PLAY MINIMIZE ZOOM CH+ VOL- VOL+ CH- SNAPSHOT MTS << FUNC >> RESET */ { 0x01, KEY_1 }, /* 1 */ { 0x0b, KEY_2 }, /* 2 */ { 0x1b, KEY_3 }, /* 3 */ { 0x05, KEY_4 }, /* 4 */ { 0x09, KEY_5 }, /* 5 */ { 0x15, KEY_6 }, /* 6 */ { 0x06, KEY_7 }, /* 7 */ { 0x0a, KEY_8 }, /* 8 */ { 0x12, KEY_9 }, /* 9 */ { 0x02, KEY_0 }, /* 0 */ { 0x10, KEY_KPPLUS }, /* + */ { 0x13, KEY_AGAIN }, /* Recall */ { 0x1e, KEY_POWER }, /* Power */ { 0x07, KEY_TUNER }, /* Source */ { 0x1c, KEY_SEARCH }, /* Scan */ { 0x18, KEY_MUTE }, /* Mute */ { 0x03, KEY_RADIO }, /* TV/FM */ /* The next four keys are duplicates that appear to send the same IR code as Ch+, Ch-, >>, and << . The raw code assigned to them is the actual code + 0x20 - they will never be detected as such unless some way is discovered to distinguish these buttons from those that have the same code. */ { 0x3f, KEY_RIGHT }, /* |> and Ch+ */ { 0x37, KEY_LEFT }, /* <| and Ch- */ { 0x2c, KEY_UP }, /* ^^Up and >> */ { 0x24, KEY_DOWN }, /* vvDn and << */ { 0x00, KEY_RECORD }, /* Record */ { 0x08, KEY_STOP }, /* Stop */ { 0x11, KEY_PLAY }, /* Play */ { 0x0f, KEY_CLOSE }, /* Minimize */ { 0x19, KEY_ZOOM }, /* Zoom */ { 0x1a, KEY_CAMERA }, /* Snapshot */ { 0x0d, KEY_LANGUAGE }, /* MTS */ { 0x14, KEY_VOLUMEDOWN }, /* Vol- */ { 0x16, KEY_VOLUMEUP }, /* Vol+ */ { 0x17, KEY_CHANNELDOWN }, /* Ch- */ { 0x1f, KEY_CHANNELUP }, /* Ch+ */ { 0x04, KEY_REWIND }, /* << */ { 0x0e, KEY_MENU }, /* Function */ { 0x0c, KEY_FASTFORWARD }, /* >> */ { 0x1d, KEY_RESTART }, /* Reset */ }; static struct rc_map_list msi_tvanywhere_plus_map = { .map = { .scan = msi_tvanywhere_plus, .size = ARRAY_SIZE(msi_tvanywhere_plus), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_MSI_TVANYWHERE_PLUS, } }; static int __init init_rc_map_msi_tvanywhere_plus(void) { return rc_map_register(&msi_tvanywhere_plus_map); } static void __exit exit_rc_map_msi_tvanywhere_plus(void) { rc_map_unregister(&msi_tvanywhere_plus_map); } module_init(init_rc_map_msi_tvanywhere_plus) module_exit(exit_rc_map_msi_tvanywhere_plus) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
ninjablocks/u-boot-kern3.2
board/quad100hd/quad100hd.c
151
2445
/* * (C) Copyright 2008 * Gary Jennejohn, DENX Software Engineering GmbH, garyj@denx.de. * * Based in part on board/icecube/icecube.c from PPCBoot * (C) Copyright 2003 Intrinsyc Software * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <command.h> #include <malloc.h> #include <environment.h> #include <logbuff.h> #include <post.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/ppc4xx-gpio.h> DECLARE_GLOBAL_DATA_PTR; int board_early_init_f(void) { /* taken from PPCBoot */ mtdcr(UIC0SR, 0xFFFFFFFF); /* clear all ints */ mtdcr(UIC0ER, 0x00000000); /* disable all ints */ mtdcr(UIC0CR, 0x00000000); mtdcr(UIC0PR, 0xFFFF7FFE); /* set int polarities */ mtdcr(UIC0TR, 0x00000000); /* set int trigger levels */ mtdcr(UIC0SR, 0xFFFFFFFF); /* clear all ints */ mtdcr(UIC0VCR, 0x00000001); /* set vect base=0,INT0 highest priority */ mtdcr(CPC0_SRR, 0x00040000); /* Hold PCI bridge in reset */ return 0; } /* * Check Board Identity: */ int checkboard(void) { char buf[64]; int i = getenv_f("serial#", buf, sizeof(buf)); #ifdef DISPLAY_BOARD_INFO sys_info_t sysinfo; #endif puts("Board: Quad100hd"); if (i > 0) { puts(", serial# "); puts(buf); } putc('\n'); #ifdef DISPLAY_BOARD_INFO /* taken from ppcboot */ get_sys_info(&sysinfo); printf("\tVCO: %lu MHz\n", sysinfo.freqVCOMhz); printf("\tCPU: %lu MHz\n", sysinfo.freqProcessor / 1000000); printf("\tPLB: %lu MHz\n", sysinfo.freqPLB / 1000000); printf("\tOPB: %lu MHz\n", sysinfo.freqOPB / 1000000); printf("\tEPB: %lu MHz\n", sysinfo.freqPLB / (sysinfo.pllExtBusDiv * 1000000)); printf("\tPCI: %lu MHz\n", sysinfo.freqPCI / 1000000); #endif return 0; }
gpl-2.0
randomblame/android_kernel_acer_t20-common
net/sched/sch_api.c
151
41979
/* * net/sched/sch_api.c Packet scheduler API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Fixes: * * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/hrtimer.h> #include <linux/lockdep.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/netlink.h> #include <net/pkt_sched.h> static int qdisc_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new); static int tclass_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct Qdisc *q, unsigned long cl, int event); /* Short review. ------------- This file consists of two interrelated parts: 1. queueing disciplines manager frontend. 2. traffic classes manager frontend. Generally, queueing discipline ("qdisc") is a black box, which is able to enqueue packets and to dequeue them (when device is ready to send something) in order and at times determined by algorithm hidden in it. qdisc's are divided to two categories: - "queues", which have no internal structure visible from outside. - "schedulers", which split all the packets to "traffic classes", using "packet classifiers" (look at cls_api.c) In turn, classes may have child qdiscs (as rule, queues) attached to them etc. etc. etc. The goal of the routines in this file is to translate information supplied by user in the form of handles to more intelligible for kernel form, to make some sanity checks and part of work, which is common to all qdiscs and to provide rtnetlink notifications. All real intelligent work is done inside qdisc modules. Every discipline has two major routines: enqueue and dequeue. ---dequeue dequeue usually returns a skb to send. It is allowed to return NULL, but it does not mean that queue is empty, it just means that discipline does not want to send anything this time. Queue is really empty if q->q.qlen == 0. For complicated disciplines with multiple queues q->q is not real packet queue, but however q->q.qlen must be valid. ---enqueue enqueue returns 0, if packet was enqueued successfully. If packet (this one or another one) was dropped, it returns not zero error code. NET_XMIT_DROP - this packet dropped Expected action: do not backoff, but wait until queue will clear. NET_XMIT_CN - probably this packet enqueued, but another one dropped. Expected action: backoff or ignore NET_XMIT_POLICED - dropped by police. Expected action: backoff or error to real-time apps. Auxiliary routines: ---peek like dequeue but without removing a packet from the queue ---reset returns qdisc to initial state: purge all buffers, clear all timers, counters (except for statistics) etc. ---init initializes newly created qdisc. ---destroy destroys resources allocated by init and during lifetime of qdisc. ---change changes qdisc parameters. */ /* Protects list of registered TC modules. It is pure SMP lock. */ static DEFINE_RWLOCK(qdisc_mod_lock); /************************************************ * Queueing disciplines manipulation. * ************************************************/ /* The list of all installed queueing disciplines. */ static struct Qdisc_ops *qdisc_base; /* Register/uregister queueing discipline */ int register_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int rc = -EEXIST; write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) if (!strcmp(qops->id, q->id)) goto out; if (qops->enqueue == NULL) qops->enqueue = noop_qdisc_ops.enqueue; if (qops->peek == NULL) { if (qops->dequeue == NULL) qops->peek = noop_qdisc_ops.peek; else goto out_einval; } if (qops->dequeue == NULL) qops->dequeue = noop_qdisc_ops.dequeue; if (qops->cl_ops) { const struct Qdisc_class_ops *cops = qops->cl_ops; if (!(cops->get && cops->put && cops->walk && cops->leaf)) goto out_einval; if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf)) goto out_einval; } qops->next = NULL; *qp = qops; rc = 0; out: write_unlock(&qdisc_mod_lock); return rc; out_einval: rc = -EINVAL; goto out; } EXPORT_SYMBOL(register_qdisc); int unregister_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int err = -ENOENT; write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) if (q == qops) break; if (q) { *qp = q->next; q->next = NULL; err = 0; } write_unlock(&qdisc_mod_lock); return err; } EXPORT_SYMBOL(unregister_qdisc); /* We know handle. Find qdisc among all qdisc's attached to device (root qdisc, all its children, children of children etc.) */ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) { struct Qdisc *q; if (!(root->flags & TCQ_F_BUILTIN) && root->handle == handle) return root; list_for_each_entry(q, &root->list, list) { if (q->handle == handle) return q; } return NULL; } static void qdisc_list_add(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list); } void qdisc_list_del(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) list_del(&q->list); } EXPORT_SYMBOL(qdisc_list_del); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { struct Qdisc *q; q = qdisc_match_from_root(dev->qdisc, handle); if (q) goto out; if (dev_ingress_queue(dev)) q = qdisc_match_from_root( dev_ingress_queue(dev)->qdisc_sleeping, handle); out: return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; struct Qdisc *leaf; const struct Qdisc_class_ops *cops = p->ops->cl_ops; if (cops == NULL) return NULL; cl = cops->get(p, classid); if (cl == 0) return NULL; leaf = cops->leaf(p, cl); cops->put(p, cl); return leaf; } /* Find queueing discipline by name */ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) { struct Qdisc_ops *q = NULL; if (kind) { read_lock(&qdisc_mod_lock); for (q = qdisc_base; q; q = q->next) { if (nla_strcmp(kind, q->id) == 0) { if (!try_module_get(q->owner)) q = NULL; break; } } read_unlock(&qdisc_mod_lock); } return q; } static struct qdisc_rate_table *qdisc_rtab_list; struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) { struct qdisc_rate_table *rtab; for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) { rtab->refcnt++; return rtab; } } if (tab == NULL || r->rate == 0 || r->cell_log == 0 || nla_len(tab) != TC_RTAB_SIZE) return NULL; rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); if (rtab) { rtab->rate = *r; rtab->refcnt = 1; memcpy(rtab->data, nla_data(tab), 1024); rtab->next = qdisc_rtab_list; qdisc_rtab_list = rtab; } return rtab; } EXPORT_SYMBOL(qdisc_get_rtab); void qdisc_put_rtab(struct qdisc_rate_table *tab) { struct qdisc_rate_table *rtab, **rtabp; if (!tab || --tab->refcnt) return; for (rtabp = &qdisc_rtab_list; (rtab = *rtabp) != NULL; rtabp = &rtab->next) { if (rtab == tab) { *rtabp = rtab->next; kfree(rtab); return; } } } EXPORT_SYMBOL(qdisc_put_rtab); static LIST_HEAD(qdisc_stab_list); static DEFINE_SPINLOCK(qdisc_stab_lock); static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, [TCA_STAB_DATA] = { .type = NLA_BINARY }, }; static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) { struct nlattr *tb[TCA_STAB_MAX + 1]; struct qdisc_size_table *stab; struct tc_sizespec *s; unsigned int tsize = 0; u16 *tab = NULL; int err; err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy); if (err < 0) return ERR_PTR(err); if (!tb[TCA_STAB_BASE]) return ERR_PTR(-EINVAL); s = nla_data(tb[TCA_STAB_BASE]); if (s->tsize > 0) { if (!tb[TCA_STAB_DATA]) return ERR_PTR(-EINVAL); tab = nla_data(tb[TCA_STAB_DATA]); tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); } if (tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); spin_lock(&qdisc_stab_lock); list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) continue; if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) continue; stab->refcnt++; spin_unlock(&qdisc_stab_lock); return stab; } spin_unlock(&qdisc_stab_lock); stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); if (!stab) return ERR_PTR(-ENOMEM); stab->refcnt = 1; stab->szopts = *s; if (tsize > 0) memcpy(stab->data, tab, tsize * sizeof(u16)); spin_lock(&qdisc_stab_lock); list_add_tail(&stab->list, &qdisc_stab_list); spin_unlock(&qdisc_stab_lock); return stab; } static void stab_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct qdisc_size_table, rcu)); } void qdisc_put_stab(struct qdisc_size_table *tab) { if (!tab) return; spin_lock(&qdisc_stab_lock); if (--tab->refcnt == 0) { list_del(&tab->list); call_rcu_bh(&tab->rcu, stab_kfree_rcu); } spin_unlock(&qdisc_stab_lock); } EXPORT_SYMBOL(qdisc_put_stab); static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_STAB); if (nest == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); nla_nest_end(skb, nest); return skb->len; nla_put_failure: return -1; } void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab) { int pkt_len, slot; pkt_len = skb->len + stab->szopts.overhead; if (unlikely(!stab->szopts.tsize)) goto out; slot = pkt_len + stab->szopts.cell_align; if (unlikely(slot < 0)) slot = 0; slot >>= stab->szopts.cell_log; if (likely(slot < stab->szopts.tsize)) pkt_len = stab->data[slot]; else pkt_len = stab->data[stab->szopts.tsize - 1] * (slot / stab->szopts.tsize) + stab->data[slot % stab->szopts.tsize]; pkt_len <<= stab->szopts.size_log; out: if (unlikely(pkt_len < 1)) pkt_len = 1; qdisc_skb_cb(skb)->pkt_len = pkt_len; } EXPORT_SYMBOL(__qdisc_calculate_pkt_len); void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) { if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", txt, qdisc->ops->id, qdisc->handle >> 16); qdisc->flags |= TCQ_F_WARN_NONWC; } } EXPORT_SYMBOL(qdisc_warn_nonwc); static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) { struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, timer); qdisc_unthrottled(wd->qdisc); __netif_schedule(qdisc_root(wd->qdisc)); return HRTIMER_NORESTART; } void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) { hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); wd->timer.function = qdisc_watchdog; wd->qdisc = qdisc; } EXPORT_SYMBOL(qdisc_watchdog_init); void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { ktime_t time; if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc_root_sleeping(wd->qdisc)->state)) return; qdisc_throttled(wd->qdisc); time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); } EXPORT_SYMBOL(qdisc_watchdog_schedule); void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) { hrtimer_cancel(&wd->timer); qdisc_unthrottled(wd->qdisc); } EXPORT_SYMBOL(qdisc_watchdog_cancel); static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) { unsigned int size = n * sizeof(struct hlist_head), i; struct hlist_head *h; if (size <= PAGE_SIZE) h = kmalloc(size, GFP_KERNEL); else h = (struct hlist_head *) __get_free_pages(GFP_KERNEL, get_order(size)); if (h != NULL) { for (i = 0; i < n; i++) INIT_HLIST_HEAD(&h[i]); } return h; } static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) { unsigned int size = n * sizeof(struct hlist_head); if (size <= PAGE_SIZE) kfree(h); else free_pages((unsigned long)h, get_order(size)); } void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) { struct Qdisc_class_common *cl; struct hlist_node *n, *next; struct hlist_head *nhash, *ohash; unsigned int nsize, nmask, osize; unsigned int i, h; /* Rehash when load factor exceeds 0.75 */ if (clhash->hashelems * 4 <= clhash->hashsize * 3) return; nsize = clhash->hashsize * 2; nmask = nsize - 1; nhash = qdisc_class_hash_alloc(nsize); if (nhash == NULL) return; ohash = clhash->hash; osize = clhash->hashsize; sch_tree_lock(sch); for (i = 0; i < osize; i++) { hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { h = qdisc_class_hash(cl->classid, nmask); hlist_add_head(&cl->hnode, &nhash[h]); } } clhash->hash = nhash; clhash->hashsize = nsize; clhash->hashmask = nmask; sch_tree_unlock(sch); qdisc_class_hash_free(ohash, osize); } EXPORT_SYMBOL(qdisc_class_hash_grow); int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) { unsigned int size = 4; clhash->hash = qdisc_class_hash_alloc(size); if (clhash->hash == NULL) return -ENOMEM; clhash->hashsize = size; clhash->hashmask = size - 1; clhash->hashelems = 0; return 0; } EXPORT_SYMBOL(qdisc_class_hash_init); void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) { qdisc_class_hash_free(clhash->hash, clhash->hashsize); } EXPORT_SYMBOL(qdisc_class_hash_destroy); void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, struct Qdisc_class_common *cl) { unsigned int h; INIT_HLIST_NODE(&cl->hnode); h = qdisc_class_hash(cl->classid, clhash->hashmask); hlist_add_head(&cl->hnode, &clhash->hash[h]); clhash->hashelems++; } EXPORT_SYMBOL(qdisc_class_hash_insert); void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, struct Qdisc_class_common *cl) { hlist_del(&cl->hnode); clhash->hashelems--; } EXPORT_SYMBOL(qdisc_class_hash_remove); /* Allocate an unique handle from space managed by kernel */ static u32 qdisc_alloc_handle(struct net_device *dev) { int i = 0x10000; static u32 autohandle = TC_H_MAKE(0x80000000U, 0); do { autohandle += TC_H_MAKE(0x10000U, 0); if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) autohandle = TC_H_MAKE(0x80000000U, 0); } while (qdisc_lookup(dev, autohandle) && --i > 0); return i > 0 ? autohandle : 0; } void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) { const struct Qdisc_class_ops *cops; unsigned long cl; u32 parentid; if (n == 0) return; while ((parentid = sch->parent)) { if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) return; sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) { WARN_ON(parentid != TC_H_ROOT); return; } cops = sch->ops->cl_ops; if (cops->qlen_notify) { cl = cops->get(sch, parentid); cops->qlen_notify(sch, cl); cops->put(sch, cl); } sch->q.qlen -= n; } } EXPORT_SYMBOL(qdisc_tree_decrease_qlen); static void notify_and_destroy(struct net *net, struct sk_buff *skb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new) { if (new || old) qdisc_notify(net, skb, n, clid, old, new); if (old) qdisc_destroy(old); } /* Graft qdisc "new" to class "classid" of qdisc "parent" or * to device "dev". * * When appropriate send a netlink notification using 'skb' * and "n". * * On success, destroy old qdisc. */ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, struct sk_buff *skb, struct nlmsghdr *n, u32 classid, struct Qdisc *new, struct Qdisc *old) { struct Qdisc *q = old; struct net *net = dev_net(dev); int err = 0; if (parent == NULL) { unsigned int i, num_q, ingress; ingress = 0; num_q = dev->num_tx_queues; if ((q && q->flags & TCQ_F_INGRESS) || (new && new->flags & TCQ_F_INGRESS)) { num_q = 1; ingress = 1; if (!dev_ingress_queue(dev)) return -ENOENT; } if (dev->flags & IFF_UP) dev_deactivate(dev); if (new && new->ops->attach) { new->ops->attach(new); num_q = 0; } for (i = 0; i < num_q; i++) { struct netdev_queue *dev_queue = dev_ingress_queue(dev); if (!ingress) dev_queue = netdev_get_tx_queue(dev, i); old = dev_graft_qdisc(dev_queue, new); if (new && i > 0) atomic_inc(&new->refcnt); if (!ingress) qdisc_destroy(old); } if (!ingress) { notify_and_destroy(net, skb, n, classid, dev->qdisc, new); if (new && !new->ops->attach) atomic_inc(&new->refcnt); dev->qdisc = new ? : &noop_qdisc; } else { notify_and_destroy(net, skb, n, classid, old, new); } if (dev->flags & IFF_UP) dev_activate(dev); } else { const struct Qdisc_class_ops *cops = parent->ops->cl_ops; err = -EOPNOTSUPP; if (cops && cops->graft) { unsigned long cl = cops->get(parent, classid); if (cl) { err = cops->graft(parent, cl, new, &old); cops->put(parent, cl); } else err = -ENOENT; } if (!err) notify_and_destroy(net, skb, n, classid, old, new); } return err; } /* lockdep annotation is needed for ingress; egress gets it only for name */ static struct lock_class_key qdisc_tx_lock; static struct lock_class_key qdisc_rx_lock; /* Allocate and initialize new qdisc. Parameters are passed via opt. */ static struct Qdisc * qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, struct Qdisc *p, u32 parent, u32 handle, struct nlattr **tca, int *errp) { int err; struct nlattr *kind = tca[TCA_KIND]; struct Qdisc *sch; struct Qdisc_ops *ops; struct qdisc_size_table *stab; ops = qdisc_lookup_ops(kind); #ifdef CONFIG_MODULES if (ops == NULL && kind != NULL) { char name[IFNAMSIZ]; if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. * We replay the request because the device may * go away in the mean time. */ rtnl_unlock(); request_module("sch_%s", name); rtnl_lock(); ops = qdisc_lookup_ops(kind); if (ops != NULL) { /* We will try again qdisc_lookup_ops, * so don't keep a reference. */ module_put(ops->owner); err = -EAGAIN; goto err_out; } } } #endif err = -ENOENT; if (ops == NULL) goto err_out; sch = qdisc_alloc(dev_queue, ops); if (IS_ERR(sch)) { err = PTR_ERR(sch); goto err_out2; } sch->parent = parent; if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; handle = TC_H_MAKE(TC_H_INGRESS, 0); lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); } else { if (handle == 0) { handle = qdisc_alloc_handle(dev); err = -ENOMEM; if (handle == 0) goto err_out3; } lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); } sch->handle = handle; if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { if (tca[TCA_STAB]) { stab = qdisc_get_stab(tca[TCA_STAB]); if (IS_ERR(stab)) { err = PTR_ERR(stab); goto err_out4; } rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { spinlock_t *root_lock; err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) goto err_out4; if ((sch->parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS) && (!p || !(p->flags & TCQ_F_MQROOT))) root_lock = qdisc_root_sleeping_lock(sch); else root_lock = qdisc_lock(sch); err = gen_new_estimator(&sch->bstats, &sch->rate_est, root_lock, tca[TCA_RATE]); if (err) goto err_out4; } qdisc_list_add(sch); return sch; } err_out3: dev_put(dev); kfree((char *) sch - sch->padded); err_out2: module_put(ops->owner); err_out: *errp = err; return NULL; err_out4: /* * Any broken qdiscs that would require a ops->reset() here? * The qdisc was never in action so it shouldn't be necessary. */ qdisc_put_stab(rtnl_dereference(sch->stab)); if (ops->destroy) ops->destroy(sch); goto err_out3; } static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) { struct qdisc_size_table *ostab, *stab = NULL; int err = 0; if (tca[TCA_OPTIONS]) { if (sch->ops->change == NULL) return -EINVAL; err = sch->ops->change(sch, tca[TCA_OPTIONS]); if (err) return err; } if (tca[TCA_STAB]) { stab = qdisc_get_stab(tca[TCA_STAB]); if (IS_ERR(stab)) return PTR_ERR(stab); } ostab = rtnl_dereference(sch->stab); rcu_assign_pointer(sch->stab, stab); qdisc_put_stab(ostab); if (tca[TCA_RATE]) { /* NB: ignores errors from replace_estimator because change can't be undone. */ if (sch->flags & TCQ_F_MQROOT) goto out; gen_replace_estimator(&sch->bstats, &sch->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); } out: return 0; } struct check_loop_arg { struct qdisc_walker w; struct Qdisc *p; int depth; }; static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w); static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) { struct check_loop_arg arg; if (q->ops->cl_ops == NULL) return 0; arg.w.stop = arg.w.skip = arg.w.count = 0; arg.w.fn = check_loop_fn; arg.depth = depth; arg.p = p; q->ops->cl_ops->walk(q, &arg.w); return arg.w.stop ? -ELOOP : 0; } static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) { struct Qdisc *leaf; const struct Qdisc_class_ops *cops = q->ops->cl_ops; struct check_loop_arg *arg = (struct check_loop_arg *)w; leaf = cops->leaf(q, cl); if (leaf) { if (leaf == arg->p || arg->depth > 7) return -ELOOP; return check_loop(leaf, arg->p, arg->depth + 1); } return 0; } /* * Delete/get qdisc. */ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = NLMSG_DATA(n); struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; u32 clid = tcm->tcm_parent; struct Qdisc *q = NULL; struct Qdisc *p = NULL; int err; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; if (clid) { if (clid != TC_H_ROOT) { if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { p = qdisc_lookup(dev, TC_H_MAJ(clid)); if (!p) return -ENOENT; q = qdisc_leaf(p, clid); } else if (dev_ingress_queue(dev)) { q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; } if (!q) return -ENOENT; if (tcm->tcm_handle && q->handle != tcm->tcm_handle) return -EINVAL; } else { q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) return -ENOENT; } if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; if (n->nlmsg_type == RTM_DELQDISC) { if (!clid) return -EINVAL; if (q->handle == 0) return -ENOENT; err = qdisc_graft(dev, p, skb, n, clid, NULL, q); if (err != 0) return err; } else { qdisc_notify(net, skb, n, clid, NULL, q); } return 0; } /* * Create/change qdisc. */ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm; struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; u32 clid; struct Qdisc *q, *p; int err; replay: /* Reinit, just in case something touches this. */ tcm = NLMSG_DATA(n); clid = tcm->tcm_parent; q = p = NULL; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; if (clid) { if (clid != TC_H_ROOT) { if (clid != TC_H_INGRESS) { p = qdisc_lookup(dev, TC_H_MAJ(clid)); if (!p) return -ENOENT; q = qdisc_leaf(p, clid); } else if (dev_ingress_queue_create(dev)) { q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; } /* It may be default qdisc, ignore it */ if (q && q->handle == 0) q = NULL; if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { if (tcm->tcm_handle) { if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) return -EEXIST; if (TC_H_MIN(tcm->tcm_handle)) return -EINVAL; q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) goto create_n_graft; if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; if (q == p || (p && check_loop(q, p, 0))) return -ELOOP; atomic_inc(&q->refcnt); goto graft; } else { if (!q) goto create_n_graft; /* This magic test requires explanation. * * We know, that some child q is already * attached to this parent and have choice: * either to change it or to create/graft new one. * * 1. We are allowed to create/graft only * if CREATE and REPLACE flags are set. * * 2. If EXCL is set, requestor wanted to say, * that qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. * For now we select create/graft, if * user gave KIND, which does not match existing. */ if ((n->nlmsg_flags & NLM_F_CREATE) && (n->nlmsg_flags & NLM_F_REPLACE) && ((n->nlmsg_flags & NLM_F_EXCL) || (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)))) goto create_n_graft; } } } else { if (!tcm->tcm_handle) return -EINVAL; q = qdisc_lookup(dev, tcm->tcm_handle); } /* Change qdisc parameters */ if (q == NULL) return -ENOENT; if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; err = qdisc_change(q, tca); if (err == 0) qdisc_notify(net, skb, n, clid, NULL, q); return err; create_n_graft: if (!(n->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) q = qdisc_create(dev, dev_ingress_queue(dev), p, tcm->tcm_parent, tcm->tcm_parent, tca, &err); else err = -ENOENT; } else { struct netdev_queue *dev_queue; if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) dev_queue = p->ops->cl_ops->select_queue(p, tcm); else if (p) dev_queue = p->dev_queue; else dev_queue = netdev_get_tx_queue(dev, 0); q = qdisc_create(dev, dev_queue, p, tcm->tcm_parent, tcm->tcm_handle, tca, &err); } if (q == NULL) { if (err == -EAGAIN) goto replay; return err; } graft: err = qdisc_graft(dev, p, skb, n, clid, q, NULL); if (err) { if (q) qdisc_destroy(q); return err; } return 0; } static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, u32 pid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; struct qdisc_size_table *stab; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(q)->ifindex; tcm->tcm_parent = clid; tcm->tcm_handle = q->handle; tcm->tcm_info = atomic_read(&q->refcnt); NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; q->qstats.qlen = q->q.qlen; stab = rtnl_dereference(q->stab); if (stab && qdisc_dump_stab(skb, stab) < 0) goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) goto nla_put_failure; if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_queue(&d, &q->qstats) < 0) goto nla_put_failure; if (gnet_stats_finish_copy(&d) < 0) goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: nla_put_failure: nlmsg_trim(skb, b); return -1; } static bool tc_qdisc_dump_ignore(struct Qdisc *q) { return (q->flags & TCQ_F_BUILTIN) ? true : false; } static int qdisc_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new) { struct sk_buff *skb; u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (old && !tc_qdisc_dump_ignore(old)) { if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) goto err_out; } if (new && !tc_qdisc_dump_ignore(new)) { if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) goto err_out; } if (skb->len) return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); err_out: kfree_skb(skb); return -EINVAL; } static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, int *q_idx_p, int s_q_idx) { int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; if (!root) return 0; q = root; if (q_idx < s_q_idx) { q_idx++; } else { if (!tc_qdisc_dump_ignore(q) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; q_idx++; } list_for_each_entry(q, &root->list, list) { if (q_idx < s_q_idx) { q_idx++; continue; } if (!tc_qdisc_dump_ignore(q) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; q_idx++; } out: *q_idx_p = q_idx; return ret; done: ret = -1; goto out; } static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx, q_idx; int s_idx, s_q_idx; struct net_device *dev; s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; rcu_read_lock(); idx = 0; for_each_netdev_rcu(net, dev) { struct netdev_queue *dev_queue; if (idx < s_idx) goto cont; if (idx > s_idx) s_q_idx = 0; q_idx = 0; if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) goto done; cont: idx++; } done: rcu_read_unlock(); cb->args[0] = idx; cb->args[1] = q_idx; return skb->len; } /************************************************ * Traffic classes manipulation. * ************************************************/ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = NLMSG_DATA(n); struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; struct Qdisc *q = NULL; const struct Qdisc_class_ops *cops; unsigned long cl = 0; unsigned long new_cl; u32 pid = tcm->tcm_parent; u32 clid = tcm->tcm_handle; u32 qid = TC_H_MAJ(clid); int err; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; /* parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. parent == X:0 - parent is root class. parent == X:Y - parent is a node in hierarchy. parent == 0:Y - parent is X:Y, where X:0 is qdisc. handle == 0:0 - generate handle from kernel pool. handle == 0:Y - class is X:Y, where X:0 is qdisc. handle == X:Y - clear. handle == X:0 - root class. */ /* Step 1. Determine qdisc handle X:0 */ if (pid != TC_H_ROOT) { u32 qid1 = TC_H_MAJ(pid); if (qid && qid1) { /* If both majors are known, they must be identical. */ if (qid != qid1) return -EINVAL; } else if (qid1) { qid = qid1; } else if (qid == 0) qid = dev->qdisc->handle; /* Now qid is genuine qdisc handle consistent * both with parent and child. * * TC_H_MAJ(pid) still may be unspecified, complete it now. */ if (pid) pid = TC_H_MAKE(qid, pid); } else { if (qid == 0) qid = dev->qdisc->handle; } /* OK. Locate qdisc */ q = qdisc_lookup(dev, qid); if (!q) return -ENOENT; /* An check that it supports classes */ cops = q->ops->cl_ops; if (cops == NULL) return -EINVAL; /* Now try to get class */ if (clid == 0) { if (pid == TC_H_ROOT) clid = qid; } else clid = TC_H_MAKE(qid, clid); if (clid) cl = cops->get(q, clid); if (cl == 0) { err = -ENOENT; if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags & NLM_F_CREATE)) goto out; } else { switch (n->nlmsg_type) { case RTM_NEWTCLASS: err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) goto out; break; case RTM_DELTCLASS: err = -EOPNOTSUPP; if (cops->delete) err = cops->delete(q, cl); if (err == 0) tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS); goto out; case RTM_GETTCLASS: err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); goto out; default: err = -EINVAL; goto out; } } new_cl = cl; err = -EOPNOTSUPP; if (cops->change) err = cops->change(q, clid, pid, tca, &new_cl); if (err == 0) tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); out: if (cl) cops->put(q, cl); return err; } static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, unsigned long cl, u32 pid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(q)->ifindex; tcm->tcm_parent = q->handle; tcm->tcm_handle = q->handle; tcm->tcm_info = 0; NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) goto nla_put_failure; if (gnet_stats_finish_copy(&d) < 0) goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: nla_put_failure: nlmsg_trim(skb, b); return -1; } static int tclass_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct Qdisc *q, unsigned long cl, int event) { struct sk_buff *skb; u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) { kfree_skb(skb); return -EINVAL; } return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); } struct qdisc_dump_args { struct qdisc_walker w; struct sk_buff *skb; struct netlink_callback *cb; }; static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) { struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid, a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); } static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, struct tcmsg *tcm, struct netlink_callback *cb, int *t_p, int s_t) { struct qdisc_dump_args arg; if (tc_qdisc_dump_ignore(q) || *t_p < s_t || !q->ops->cl_ops || (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)) { (*t_p)++; return 0; } if (*t_p > s_t) memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); arg.w.fn = qdisc_class_dump; arg.skb = skb; arg.cb = cb; arg.w.stop = 0; arg.w.skip = cb->args[1]; arg.w.count = 0; q->ops->cl_ops->walk(q, &arg.w); cb->args[1] = arg.w.count; if (arg.w.stop) return -1; (*t_p)++; return 0; } static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, struct tcmsg *tcm, struct netlink_callback *cb, int *t_p, int s_t) { struct Qdisc *q; if (!root) return 0; if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) return -1; list_for_each_entry(q, &root->list, list) { if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) return -1; } return 0; } static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) { struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); struct net *net = sock_net(skb->sk); struct netdev_queue *dev_queue; struct net_device *dev; int t, s_t; if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return 0; dev = dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return 0; s_t = cb->args[0]; t = 0; if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) goto done; done: cb->args[0] = t; dev_put(dev); return skb->len; } /* Main classifier routine: scans classifier chain attached * to this qdisc, (optionally) tests for protocol and asks * specific classifiers. */ int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { __be16 protocol = skb->protocol; int err; for (; tp; tp = tp->next) { if (tp->protocol != protocol && tp->protocol != htons(ETH_P_ALL)) continue; err = tp->classify(skb, tp, res); if (err >= 0) { #ifdef CONFIG_NET_CLS_ACT if (err != TC_ACT_RECLASSIFY && skb->tc_verd) skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); #endif return err; } } return -1; } EXPORT_SYMBOL(tc_classify_compat); int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { int err = 0; #ifdef CONFIG_NET_CLS_ACT __be16 protocol; struct tcf_proto *otp = tp; reclassify: protocol = skb->protocol; #endif err = tc_classify_compat(skb, tp, res); #ifdef CONFIG_NET_CLS_ACT if (err == TC_ACT_RECLASSIFY) { u32 verd = G_TC_VERD(skb->tc_verd); tp = otp; if (verd++ >= MAX_REC_LOOP) { if (net_ratelimit()) pr_notice("%s: packet reclassify loop" " rule prio %u protocol %02x\n", tp->q->ops->id, tp->prio & 0xffff, ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); goto reclassify; } #endif return err; } EXPORT_SYMBOL(tc_classify); void tcf_destroy(struct tcf_proto *tp) { tp->ops->destroy(tp); module_put(tp->ops->owner); kfree(tp); } void tcf_destroy_chain(struct tcf_proto **fl) { struct tcf_proto *tp; while ((tp = *fl) != NULL) { *fl = tp->next; tcf_destroy(tp); } } EXPORT_SYMBOL(tcf_destroy_chain); #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { struct timespec ts; hrtimer_get_res(CLOCK_MONOTONIC, &ts); seq_printf(seq, "%08x %08x %08x %08x\n", (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1), 1000000, (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts))); return 0; } static int psched_open(struct inode *inode, struct file *file) { return single_open(file, psched_show, NULL); } static const struct file_operations psched_fops = { .owner = THIS_MODULE, .open = psched_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __net_init psched_net_init(struct net *net) { struct proc_dir_entry *e; e = proc_net_fops_create(net, "psched", 0, &psched_fops); if (e == NULL) return -ENOMEM; return 0; } static void __net_exit psched_net_exit(struct net *net) { proc_net_remove(net, "psched"); } #else static int __net_init psched_net_init(struct net *net) { return 0; } static void __net_exit psched_net_exit(struct net *net) { } #endif static struct pernet_operations psched_net_ops = { .init = psched_net_init, .exit = psched_net_exit, }; static int __init pktsched_init(void) { int err; err = register_pernet_subsys(&psched_net_ops); if (err) { pr_err("pktsched_init: " "cannot initialize per netns operations\n"); return err; } register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); register_qdisc(&pfifo_head_drop_qdisc_ops); register_qdisc(&mq_qdisc_ops); rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc); rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); return 0; } subsys_initcall(pktsched_init);
gpl-2.0
NinjahMeh/android_kernel_huawei_angler
drivers/staging/android/sync.c
151
24065
/* * drivers/base/sync.c * * Copyright (C) 2012 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/anon_inodes.h> #include <linux/sync.h> #define CREATE_TRACE_POINTS #include "trace/sync.h" static void sync_fence_signal_pt(struct sync_pt *pt); static int _sync_pt_has_signaled(struct sync_pt *pt); static void sync_fence_free(struct kref *kref); static LIST_HEAD(sync_timeline_list_head); static DEFINE_SPINLOCK(sync_timeline_list_lock); static LIST_HEAD(sync_fence_list_head); static DEFINE_SPINLOCK(sync_fence_list_lock); struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name) { struct sync_timeline *obj; unsigned long flags; if (size < sizeof(struct sync_timeline)) return NULL; obj = kzalloc(size, GFP_KERNEL); if (obj == NULL) return NULL; kref_init(&obj->kref); obj->ops = ops; strlcpy(obj->name, name, sizeof(obj->name)); INIT_LIST_HEAD(&obj->child_list_head); spin_lock_init(&obj->child_list_lock); INIT_LIST_HEAD(&obj->active_list_head); spin_lock_init(&obj->active_list_lock); spin_lock_irqsave(&sync_timeline_list_lock, flags); list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); return obj; } EXPORT_SYMBOL(sync_timeline_create); static void sync_timeline_free(struct kref *kref) { struct sync_timeline *obj = container_of(kref, struct sync_timeline, kref); unsigned long flags; spin_lock_irqsave(&sync_timeline_list_lock, flags); list_del(&obj->sync_timeline_list); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); if (obj->ops->release_obj) obj->ops->release_obj(obj); kfree(obj); } void sync_timeline_destroy(struct sync_timeline *obj) { obj->destroyed = true; smp_wmb(); /* * signal any children that their parent is going away. */ sync_timeline_signal(obj); kref_put(&obj->kref, sync_timeline_free); } EXPORT_SYMBOL(sync_timeline_destroy); static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) { unsigned long flags; pt->parent = obj; spin_lock_irqsave(&obj->child_list_lock, flags); list_add_tail(&pt->child_list, &obj->child_list_head); spin_unlock_irqrestore(&obj->child_list_lock, flags); } static void sync_timeline_remove_pt(struct sync_pt *pt) { struct sync_timeline *obj = pt->parent; unsigned long flags; spin_lock_irqsave(&obj->active_list_lock, flags); if (!list_empty(&pt->active_list)) list_del_init(&pt->active_list); spin_unlock_irqrestore(&obj->active_list_lock, flags); spin_lock_irqsave(&obj->child_list_lock, flags); if (!list_empty(&pt->child_list)) { list_del_init(&pt->child_list); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } void sync_timeline_signal(struct sync_timeline *obj) { unsigned long flags; LIST_HEAD(signaled_pts); struct list_head *pos, *n; trace_sync_timeline(obj); spin_lock_irqsave(&obj->active_list_lock, flags); list_for_each_safe(pos, n, &obj->active_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, active_list); if (_sync_pt_has_signaled(pt)) { list_del_init(pos); list_add(&pt->signaled_list, &signaled_pts); kref_get(&pt->fence->kref); } } spin_unlock_irqrestore(&obj->active_list_lock, flags); list_for_each_safe(pos, n, &signaled_pts) { struct sync_pt *pt = container_of(pos, struct sync_pt, signaled_list); list_del_init(pos); sync_fence_signal_pt(pt); kref_put(&pt->fence->kref, sync_fence_free); } } EXPORT_SYMBOL(sync_timeline_signal); struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) { struct sync_pt *pt; if (size < sizeof(struct sync_pt)) return NULL; pt = kzalloc(size, GFP_KERNEL); if (pt == NULL) return NULL; INIT_LIST_HEAD(&pt->active_list); kref_get(&parent->kref); sync_timeline_add_pt(parent, pt); return pt; } EXPORT_SYMBOL(sync_pt_create); void sync_pt_free(struct sync_pt *pt) { if (pt->parent->ops->free_pt) pt->parent->ops->free_pt(pt); sync_timeline_remove_pt(pt); kref_put(&pt->parent->kref, sync_timeline_free); kfree(pt); } EXPORT_SYMBOL(sync_pt_free); /* call with pt->parent->active_list_lock held */ static int _sync_pt_has_signaled(struct sync_pt *pt) { int old_status = pt->status; if (!pt->status) pt->status = pt->parent->ops->has_signaled(pt); if (!pt->status && pt->parent->destroyed) pt->status = -ENOENT; if (pt->status != old_status) pt->timestamp = ktime_get(); return pt->status; } static struct sync_pt *sync_pt_dup(struct sync_pt *pt) { return pt->parent->ops->dup(pt); } /* Adds a sync pt to the active queue. Called when added to a fence */ static void sync_pt_activate(struct sync_pt *pt) { struct sync_timeline *obj = pt->parent; unsigned long flags; int err; spin_lock_irqsave(&obj->active_list_lock, flags); err = _sync_pt_has_signaled(pt); if (err != 0) goto out; list_add_tail(&pt->active_list, &obj->active_list_head); out: spin_unlock_irqrestore(&obj->active_list_lock, flags); } static int sync_fence_release(struct inode *inode, struct file *file); static unsigned int sync_fence_poll(struct file *file, poll_table *wait); static long sync_fence_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static const struct file_operations sync_fence_fops = { .release = sync_fence_release, .poll = sync_fence_poll, .unlocked_ioctl = sync_fence_ioctl, .compat_ioctl = sync_fence_ioctl, }; static struct sync_fence *sync_fence_alloc(const char *name) { struct sync_fence *fence; unsigned long flags; fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); if (fence == NULL) return NULL; fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, fence, 0); if (IS_ERR(fence->file)) goto err; kref_init(&fence->kref); strlcpy(fence->name, name, sizeof(fence->name)); INIT_LIST_HEAD(&fence->pt_list_head); INIT_LIST_HEAD(&fence->waiter_list_head); spin_lock_init(&fence->waiter_list_lock); init_waitqueue_head(&fence->wq); spin_lock_irqsave(&sync_fence_list_lock, flags); list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); spin_unlock_irqrestore(&sync_fence_list_lock, flags); return fence; err: kfree(fence); return NULL; } /* TODO: implement a create which takes more that one sync_pt */ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) { struct sync_fence *fence; if (pt->fence) return NULL; fence = sync_fence_alloc(name); if (fence == NULL) return NULL; pt->fence = fence; list_add(&pt->pt_list, &fence->pt_list_head); sync_pt_activate(pt); /* * signal the fence in case pt was activated before * sync_pt_activate(pt) was called */ sync_fence_signal_pt(pt); return fence; } EXPORT_SYMBOL(sync_fence_create); static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) { struct list_head *pos; list_for_each(pos, &src->pt_list_head) { struct sync_pt *orig_pt = container_of(pos, struct sync_pt, pt_list); struct sync_pt *new_pt = sync_pt_dup(orig_pt); if (new_pt == NULL) return -ENOMEM; new_pt->fence = dst; list_add(&new_pt->pt_list, &dst->pt_list_head); } return 0; } static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) { struct list_head *src_pos, *dst_pos, *n; list_for_each(src_pos, &src->pt_list_head) { struct sync_pt *src_pt = container_of(src_pos, struct sync_pt, pt_list); bool collapsed = false; list_for_each_safe(dst_pos, n, &dst->pt_list_head) { struct sync_pt *dst_pt = container_of(dst_pos, struct sync_pt, pt_list); /* collapse two sync_pts on the same timeline * to a single sync_pt that will signal at * the later of the two */ if (dst_pt->parent == src_pt->parent) { if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) { struct sync_pt *new_pt = sync_pt_dup(src_pt); if (new_pt == NULL) return -ENOMEM; new_pt->fence = dst; list_replace(&dst_pt->pt_list, &new_pt->pt_list); sync_pt_free(dst_pt); } collapsed = true; break; } } if (!collapsed) { struct sync_pt *new_pt = sync_pt_dup(src_pt); if (new_pt == NULL) return -ENOMEM; new_pt->fence = dst; list_add(&new_pt->pt_list, &dst->pt_list_head); } } return 0; } static void sync_fence_detach_pts(struct sync_fence *fence) { struct list_head *pos, *n; list_for_each_safe(pos, n, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); sync_timeline_remove_pt(pt); } } static void sync_fence_free_pts(struct sync_fence *fence) { struct list_head *pos, *n; list_for_each_safe(pos, n, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); sync_pt_free(pt); } } struct sync_fence *sync_fence_fdget(int fd) { struct file *file = fget(fd); if (file == NULL) return NULL; if (file->f_op != &sync_fence_fops) goto err; return file->private_data; err: fput(file); return NULL; } EXPORT_SYMBOL(sync_fence_fdget); void sync_fence_put(struct sync_fence *fence) { fput(fence->file); } EXPORT_SYMBOL(sync_fence_put); void sync_fence_install(struct sync_fence *fence, int fd) { fd_install(fd, fence->file); } EXPORT_SYMBOL(sync_fence_install); static int sync_fence_get_status(struct sync_fence *fence) { struct list_head *pos; int status = 1; list_for_each(pos, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); int pt_status = pt->status; if (pt_status < 0) { status = pt_status; break; } else if (status == 1) { status = pt_status; } } return status; } struct sync_fence *sync_fence_merge(const char *name, struct sync_fence *a, struct sync_fence *b) { struct sync_fence *fence; struct list_head *pos; int err; fence = sync_fence_alloc(name); if (fence == NULL) return NULL; err = sync_fence_copy_pts(fence, a); if (err < 0) goto err; err = sync_fence_merge_pts(fence, b); if (err < 0) goto err; list_for_each(pos, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); sync_pt_activate(pt); } /* * signal the fence in case one of it's pts were activated before * they were activated */ sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, struct sync_pt, pt_list)); return fence; err: sync_fence_free_pts(fence); kfree(fence); return NULL; } EXPORT_SYMBOL(sync_fence_merge); static void sync_fence_signal_pt(struct sync_pt *pt) { LIST_HEAD(signaled_waiters); struct sync_fence *fence = pt->fence; struct list_head *pos; struct list_head *n; unsigned long flags; int status; status = sync_fence_get_status(fence); spin_lock_irqsave(&fence->waiter_list_lock, flags); /* * this should protect against two threads racing on the signaled * false -> true transition */ if (status && !fence->status) { list_for_each_safe(pos, n, &fence->waiter_list_head) list_move(pos, &signaled_waiters); fence->status = status; } else { status = 0; } spin_unlock_irqrestore(&fence->waiter_list_lock, flags); if (status) { list_for_each_safe(pos, n, &signaled_waiters) { struct sync_fence_waiter *waiter = container_of(pos, struct sync_fence_waiter, waiter_list); list_del(pos); waiter->callback(fence, waiter); } wake_up(&fence->wq); } } int sync_fence_wait_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) { unsigned long flags; int err = 0; spin_lock_irqsave(&fence->waiter_list_lock, flags); if (fence->status) { err = fence->status; goto out; } list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); out: spin_unlock_irqrestore(&fence->waiter_list_lock, flags); return err; } EXPORT_SYMBOL(sync_fence_wait_async); int sync_fence_cancel_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) { struct list_head *pos; struct list_head *n; unsigned long flags; int ret = -ENOENT; spin_lock_irqsave(&fence->waiter_list_lock, flags); /* * Make sure waiter is still in waiter_list because it is possible for * the waiter to be removed from the list while the callback is still * pending. */ list_for_each_safe(pos, n, &fence->waiter_list_head) { struct sync_fence_waiter *list_waiter = container_of(pos, struct sync_fence_waiter, waiter_list); if (list_waiter == waiter) { list_del(pos); ret = 0; break; } } spin_unlock_irqrestore(&fence->waiter_list_lock, flags); return ret; } EXPORT_SYMBOL(sync_fence_cancel_async); static bool sync_fence_check(struct sync_fence *fence) { /* * Make sure that reads to fence->status are ordered with the * wait queue event triggering */ smp_rmb(); return fence->status != 0; } static const char *sync_status_str(int status) { if (status > 0) return "signaled"; else if (status == 0) return "active"; else return "error"; } static void sync_pt_log(struct sync_pt *pt, bool pt_callback) { int status = pt->status; pr_cont(" %s_pt %s", pt->parent->name, sync_status_str(status)); if (pt->status) { struct timeval tv = ktime_to_timeval(pt->timestamp); pr_cont("@%ld.%06ld", tv.tv_sec, tv.tv_usec); } if (pt->parent->ops->timeline_value_str && pt->parent->ops->pt_value_str) { char value[64]; pt->parent->ops->pt_value_str(pt, value, sizeof(value)); pr_cont(": %s", value); pt->parent->ops->timeline_value_str(pt->parent, value, sizeof(value)); pr_cont(" / %s", value); } pr_cont("\n"); /* Show additional details for active fences */ if (pt->status == 0 && pt->parent->ops->pt_log && pt_callback) pt->parent->ops->pt_log(pt); } void _sync_fence_log(struct sync_fence *fence, bool pt_callback) { struct list_head *pos; unsigned long flags; pr_info("[%p] %s: %s\n", fence, fence->name, sync_status_str(fence->status)); pr_info("waiters:\n"); spin_lock_irqsave(&fence->waiter_list_lock, flags); list_for_each(pos, &fence->waiter_list_head) { struct sync_fence_waiter *waiter = container_of(pos, struct sync_fence_waiter, waiter_list); pr_info(" %pF\n", waiter->callback); } spin_unlock_irqrestore(&fence->waiter_list_lock, flags); pr_info("syncpoints:\n"); list_for_each(pos, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); sync_pt_log(pt, pt_callback); } } void sync_fence_log(struct sync_fence *fence) { _sync_fence_log(fence, false); } EXPORT_SYMBOL(sync_fence_log); int sync_fence_wait(struct sync_fence *fence, long timeout) { int err = 0; struct sync_pt *pt; trace_sync_wait(fence, 1); list_for_each_entry(pt, &fence->pt_list_head, pt_list) trace_sync_pt(pt); if (timeout > 0) { timeout = msecs_to_jiffies(timeout); err = wait_event_interruptible_timeout(fence->wq, sync_fence_check(fence), timeout); } else if (timeout < 0) { err = wait_event_interruptible(fence->wq, sync_fence_check(fence)); } trace_sync_wait(fence, 0); if (err < 0) return err; if (fence->status < 0) { pr_info("fence error %d on [%p]\n", fence->status, fence); _sync_fence_log(fence, true); return fence->status; } if (fence->status == 0) { if (timeout > 0) { pr_info("fence timeout on [%p] after %dms\n", fence, jiffies_to_msecs(timeout)); _sync_fence_log(fence, true); } return -ETIME; } return 0; } EXPORT_SYMBOL(sync_fence_wait); static void sync_fence_free(struct kref *kref) { struct sync_fence *fence = container_of(kref, struct sync_fence, kref); sync_fence_free_pts(fence); kfree(fence); } static int sync_fence_release(struct inode *inode, struct file *file) { struct sync_fence *fence = file->private_data; unsigned long flags; /* * We need to remove all ways to access this fence before droping * our ref. * * start with its membership in the global fence list */ spin_lock_irqsave(&sync_fence_list_lock, flags); list_del(&fence->sync_fence_list); spin_unlock_irqrestore(&sync_fence_list_lock, flags); /* * remove its pts from their parents so that sync_timeline_signal() * can't reference the fence. */ sync_fence_detach_pts(fence); kref_put(&fence->kref, sync_fence_free); return 0; } static unsigned int sync_fence_poll(struct file *file, poll_table *wait) { struct sync_fence *fence = file->private_data; poll_wait(file, &fence->wq, wait); /* * Make sure that reads to fence->status are ordered with the * wait queue event triggering */ smp_rmb(); if (fence->status == 1) return POLLIN; else if (fence->status < 0) return POLLERR; else return 0; } static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) { __s32 value; if (copy_from_user(&value, (void __user *)arg, sizeof(value))) return -EFAULT; return sync_fence_wait(fence, value); } static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) { int fd = get_unused_fd(); int err; struct sync_fence *fence2, *fence3; struct sync_merge_data data; if (fd < 0) return fd; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { err = -EFAULT; goto err_put_fd; } fence2 = sync_fence_fdget(data.fd2); if (fence2 == NULL) { err = -ENOENT; goto err_put_fd; } data.name[sizeof(data.name) - 1] = '\0'; fence3 = sync_fence_merge(data.name, fence, fence2); if (fence3 == NULL) { err = -ENOMEM; goto err_put_fence2; } data.fence = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { err = -EFAULT; goto err_put_fence3; } sync_fence_install(fence3, fd); sync_fence_put(fence2); return 0; err_put_fence3: sync_fence_put(fence3); err_put_fence2: sync_fence_put(fence2); err_put_fd: put_unused_fd(fd); return err; } static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) { struct sync_pt_info *info = data; int ret; if (size < sizeof(struct sync_pt_info)) return -ENOMEM; info->len = sizeof(struct sync_pt_info); if (pt->parent->ops->fill_driver_data) { ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, size - sizeof(*info)); if (ret < 0) return ret; info->len += ret; } strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); strlcpy(info->driver_name, pt->parent->ops->driver_name, sizeof(info->driver_name)); info->status = pt->status; info->timestamp_ns = ktime_to_ns(pt->timestamp); return info->len; } static long sync_fence_ioctl_fence_info(struct sync_fence *fence, unsigned long arg) { struct sync_fence_info_data *data; struct list_head *pos; __u32 size; __u32 len = 0; int ret; if (copy_from_user(&size, (void __user *)arg, sizeof(size))) return -EFAULT; if (size < sizeof(struct sync_fence_info_data)) return -EINVAL; if (size > 4096) size = 4096; data = kzalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM; strlcpy(data->name, fence->name, sizeof(data->name)); data->status = fence->status; len = sizeof(struct sync_fence_info_data); list_for_each(pos, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); if (ret < 0) goto out; len += ret; } data->len = len; if (copy_to_user((void __user *)arg, data, len)) ret = -EFAULT; else ret = 0; out: kfree(data); return ret; } static long sync_fence_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sync_fence *fence = file->private_data; switch (cmd) { case SYNC_IOC_WAIT: return sync_fence_ioctl_wait(fence, arg); case SYNC_IOC_MERGE: return sync_fence_ioctl_merge(fence, arg); case SYNC_IOC_FENCE_INFO: return sync_fence_ioctl_fence_info(fence, arg); default: return -ENOTTY; } } #ifdef CONFIG_DEBUG_FS static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) { int status = pt->status; seq_printf(s, " %s%spt %s", fence ? pt->parent->name : "", fence ? "_" : "", sync_status_str(status)); if (pt->status) { struct timeval tv = ktime_to_timeval(pt->timestamp); seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); } if (pt->parent->ops->timeline_value_str && pt->parent->ops->pt_value_str) { char value[64]; pt->parent->ops->pt_value_str(pt, value, sizeof(value)); seq_printf(s, ": %s", value); if (fence) { pt->parent->ops->timeline_value_str(pt->parent, value, sizeof(value)); seq_printf(s, " / %s", value); } } else if (pt->parent->ops->print_pt) { seq_printf(s, ": "); pt->parent->ops->print_pt(s, pt); } seq_printf(s, "\n"); } static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) { struct list_head *pos; unsigned long flags; seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); if (obj->ops->timeline_value_str) { char value[64]; obj->ops->timeline_value_str(obj, value, sizeof(value)); seq_printf(s, ": %s", value); } else if (obj->ops->print_obj) { seq_printf(s, ": "); obj->ops->print_obj(s, obj); } seq_printf(s, "\n"); spin_lock_irqsave(&obj->child_list_lock, flags); list_for_each(pos, &obj->child_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, child_list); sync_print_pt(s, pt, false); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) { struct list_head *pos; unsigned long flags; seq_printf(s, "[%p] %s: %s\n", fence, fence->name, sync_status_str(fence->status)); list_for_each(pos, &fence->pt_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); sync_print_pt(s, pt, true); } spin_lock_irqsave(&fence->waiter_list_lock, flags); list_for_each(pos, &fence->waiter_list_head) { struct sync_fence_waiter *waiter = container_of(pos, struct sync_fence_waiter, waiter_list); seq_printf(s, "waiter %pF\n", waiter->callback); } spin_unlock_irqrestore(&fence->waiter_list_lock, flags); } static int sync_debugfs_show(struct seq_file *s, void *unused) { unsigned long flags; struct list_head *pos; seq_printf(s, "objs:\n--------------\n"); spin_lock_irqsave(&sync_timeline_list_lock, flags); list_for_each(pos, &sync_timeline_list_head) { struct sync_timeline *obj = container_of(pos, struct sync_timeline, sync_timeline_list); sync_print_obj(s, obj); seq_printf(s, "\n"); } spin_unlock_irqrestore(&sync_timeline_list_lock, flags); seq_printf(s, "fences:\n--------------\n"); spin_lock_irqsave(&sync_fence_list_lock, flags); list_for_each(pos, &sync_fence_list_head) { struct sync_fence *fence = container_of(pos, struct sync_fence, sync_fence_list); sync_print_fence(s, fence); seq_printf(s, "\n"); } spin_unlock_irqrestore(&sync_fence_list_lock, flags); return 0; } static int sync_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, sync_debugfs_show, inode->i_private); } static const struct file_operations sync_debugfs_fops = { .open = sync_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static __init int sync_debugfs_init(void) { debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); return 0; } late_initcall(sync_debugfs_init); #endif
gpl-2.0
teemodk/android_kernel_htc_endeavoru
arch/arm/plat-samsung/dev-i2c0.c
407
1537
/* linux/arch/arm/plat-s3c/dev-i2c0.c * * Copyright 2008-2009 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C series device definition for i2c device 0 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/regs-iic.h> #include <plat/iic.h> #include <plat/devs.h> #include <plat/cpu.h> static struct resource s3c_i2c_resource[] = { [0] = { .start = S3C_PA_IIC, .end = S3C_PA_IIC + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IIC, .end = IRQ_IIC, .flags = IORESOURCE_IRQ, }, }; struct platform_device s3c_device_i2c0 = { .name = "s3c2410-i2c", #ifdef CONFIG_S3C_DEV_I2C1 .id = 0, #else .id = -1, #endif .num_resources = ARRAY_SIZE(s3c_i2c_resource), .resource = s3c_i2c_resource, }; struct s3c2410_platform_i2c default_i2c_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, }; void __init s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *pd) { struct s3c2410_platform_i2c *npd; if (!pd) pd = &default_i2c_data; npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), &s3c_device_i2c0); if (!npd->cfg_gpio) npd->cfg_gpio = s3c_i2c0_cfg_gpio; }
gpl-2.0
GustavoRD78/fw_z3
fs/fhandle.c
1175
6696
#include <linux/syscalls.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/exportfs.h> #include <linux/fs_struct.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <asm/uaccess.h> #include "internal.h" #include "mount.h" static long do_sys_name_to_handle(struct path *path, struct file_handle __user *ufh, int __user *mnt_id) { long retval; struct file_handle f_handle; int handle_dwords, handle_bytes; struct file_handle *handle = NULL; /* * We need t make sure wether the file system * support decoding of the file handle */ if (!path->dentry->d_sb->s_export_op || !path->dentry->d_sb->s_export_op->fh_to_dentry) return -EOPNOTSUPP; if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) return -EFAULT; if (f_handle.handle_bytes > MAX_HANDLE_SZ) return -EINVAL; handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes, GFP_KERNEL); if (!handle) return -ENOMEM; /* convert handle size to multiple of sizeof(u32) */ handle_dwords = f_handle.handle_bytes >> 2; /* we ask for a non connected handle */ retval = exportfs_encode_fh(path->dentry, (struct fid *)handle->f_handle, &handle_dwords, 0); handle->handle_type = retval; /* convert handle size to bytes */ handle_bytes = handle_dwords * sizeof(u32); handle->handle_bytes = handle_bytes; if ((handle->handle_bytes > f_handle.handle_bytes) || (retval == 255) || (retval == -ENOSPC)) { /* As per old exportfs_encode_fh documentation * we could return ENOSPC to indicate overflow * But file system returned 255 always. So handle * both the values */ /* * set the handle size to zero so we copy only * non variable part of the file_handle */ handle_bytes = 0; retval = -EOVERFLOW; } else retval = 0; /* copy the mount id */ if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id, sizeof(*mnt_id)) || copy_to_user(ufh, handle, sizeof(struct file_handle) + handle_bytes)) retval = -EFAULT; kfree(handle); return retval; } /** * sys_name_to_handle_at: convert name to handle * @dfd: directory relative to which name is interpreted if not absolute * @name: name that should be converted to handle. * @handle: resulting file handle * @mnt_id: mount id of the file system containing the file * @flag: flag value to indicate whether to follow symlink or not * * @handle->handle_size indicate the space available to store the * variable part of the file handle in bytes. If there is not * enough space, the field is updated to return the minimum * value required. */ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag) { struct path path; int lookup_flags; int err; if ((flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; lookup_flags = (flag & AT_SYMLINK_FOLLOW) ? LOOKUP_FOLLOW : 0; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; err = user_path_at(dfd, name, lookup_flags, &path); if (!err) { err = do_sys_name_to_handle(&path, handle, mnt_id); path_put(&path); } return err; } static struct vfsmount *get_vfsmount_from_fd(int fd) { struct path path; if (fd == AT_FDCWD) { struct fs_struct *fs = current->fs; spin_lock(&fs->lock); path = fs->pwd; mntget(path.mnt); spin_unlock(&fs->lock); } else { int fput_needed; struct file *file = fget_light(fd, &fput_needed); if (!file) return ERR_PTR(-EBADF); path = file->f_path; mntget(path.mnt); fput_light(file, fput_needed); } return path.mnt; } static int vfs_dentry_acceptable(void *context, struct dentry *dentry) { return 1; } static int do_handle_to_path(int mountdirfd, struct file_handle *handle, struct path *path) { int retval = 0; int handle_dwords; path->mnt = get_vfsmount_from_fd(mountdirfd); if (IS_ERR(path->mnt)) { retval = PTR_ERR(path->mnt); goto out_err; } /* change the handle size to multiple of sizeof(u32) */ handle_dwords = handle->handle_bytes >> 2; path->dentry = exportfs_decode_fh(path->mnt, (struct fid *)handle->f_handle, handle_dwords, handle->handle_type, vfs_dentry_acceptable, NULL); if (IS_ERR(path->dentry)) { retval = PTR_ERR(path->dentry); goto out_mnt; } return 0; out_mnt: mntput(path->mnt); out_err: return retval; } static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, struct path *path) { int retval = 0; struct file_handle f_handle; struct file_handle *handle = NULL; /* * With handle we don't look at the execute bit on the * the directory. Ideally we would like CAP_DAC_SEARCH. * But we don't have that */ if (!capable(CAP_DAC_READ_SEARCH)) { retval = -EPERM; goto out_err; } if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) { retval = -EFAULT; goto out_err; } if ((f_handle.handle_bytes > MAX_HANDLE_SZ) || (f_handle.handle_bytes == 0)) { retval = -EINVAL; goto out_err; } handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes, GFP_KERNEL); if (!handle) { retval = -ENOMEM; goto out_err; } /* copy the full handle */ *handle = f_handle; if (copy_from_user(&handle->f_handle, &ufh->f_handle, f_handle.handle_bytes)) { retval = -EFAULT; goto out_handle; } retval = do_handle_to_path(mountdirfd, handle, path); out_handle: kfree(handle); out_err: return retval; } long do_handle_open(int mountdirfd, struct file_handle __user *ufh, int open_flag) { long retval = 0; struct path path; struct file *file; int fd; retval = handle_to_path(mountdirfd, ufh, &path); if (retval) return retval; fd = get_unused_fd_flags(open_flag); if (fd < 0) { path_put(&path); return fd; } file = file_open_root(path.dentry, path.mnt, "", open_flag); if (IS_ERR(file)) { put_unused_fd(fd); retval = PTR_ERR(file); } else { retval = fd; fsnotify_open(file); fd_install(fd, file); } path_put(&path); return retval; } /** * sys_open_by_handle_at: Open the file handle * @mountdirfd: directory file descriptor * @handle: file handle to be opened * @flag: open flags. * * @mountdirfd indicate the directory file descriptor * of the mount point. file handle is decoded relative * to the vfsmount pointed by the @mountdirfd. @flags * value is same as the open(2) flags. */ SYSCALL_DEFINE3(open_by_handle_at, int, mountdirfd, struct file_handle __user *, handle, int, flags) { long ret; if (force_o_largefile()) flags |= O_LARGEFILE; ret = do_handle_open(mountdirfd, handle, flags); return ret; }
gpl-2.0
0x00evil/linux
drivers/gpu/host1x/job.c
1431
13411
/* * Tegra host1x Job * * Copyright (c) 2010-2013, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/host1x.h> #include <linux/kref.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <trace/events/host1x.h> #include "channel.h" #include "dev.h" #include "job.h" #include "syncpt.h" struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, u32 num_cmdbufs, u32 num_relocs, u32 num_waitchks) { struct host1x_job *job = NULL; unsigned int num_unpins = num_cmdbufs + num_relocs; u64 total; void *mem; /* Check that we're not going to overflow */ total = sizeof(struct host1x_job) + (u64)num_relocs * sizeof(struct host1x_reloc) + (u64)num_unpins * sizeof(struct host1x_job_unpin_data) + (u64)num_waitchks * sizeof(struct host1x_waitchk) + (u64)num_cmdbufs * sizeof(struct host1x_job_gather) + (u64)num_unpins * sizeof(dma_addr_t) + (u64)num_unpins * sizeof(u32 *); if (total > ULONG_MAX) return NULL; mem = job = kzalloc(total, GFP_KERNEL); if (!job) return NULL; kref_init(&job->ref); job->channel = ch; /* Redistribute memory to the structs */ mem += sizeof(struct host1x_job); job->relocarray = num_relocs ? mem : NULL; mem += num_relocs * sizeof(struct host1x_reloc); job->unpins = num_unpins ? mem : NULL; mem += num_unpins * sizeof(struct host1x_job_unpin_data); job->waitchk = num_waitchks ? mem : NULL; mem += num_waitchks * sizeof(struct host1x_waitchk); job->gathers = num_cmdbufs ? mem : NULL; mem += num_cmdbufs * sizeof(struct host1x_job_gather); job->addr_phys = num_unpins ? mem : NULL; job->reloc_addr_phys = job->addr_phys; job->gather_addr_phys = &job->addr_phys[num_relocs]; return job; } EXPORT_SYMBOL(host1x_job_alloc); struct host1x_job *host1x_job_get(struct host1x_job *job) { kref_get(&job->ref); return job; } EXPORT_SYMBOL(host1x_job_get); static void job_free(struct kref *ref) { struct host1x_job *job = container_of(ref, struct host1x_job, ref); kfree(job); } void host1x_job_put(struct host1x_job *job) { kref_put(&job->ref, job_free); } EXPORT_SYMBOL(host1x_job_put); void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, u32 words, u32 offset) { struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers]; cur_gather->words = words; cur_gather->bo = bo; cur_gather->offset = offset; job->num_gathers++; } EXPORT_SYMBOL(host1x_job_add_gather); /* * NULL an already satisfied WAIT_SYNCPT host method, by patching its * args in the command stream. The method data is changed to reference * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt * with a matching threshold value of 0, so is guaranteed to be popped * by the host HW. */ static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp, struct host1x_bo *h, u32 offset) { void *patch_addr = NULL; /* patch the wait */ patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT); if (patch_addr) { host1x_syncpt_patch_wait(sp, patch_addr + (offset & ~PAGE_MASK)); host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr); } else pr_err("Could not map cmdbuf for wait check\n"); } /* * Check driver supplied waitchk structs for syncpt thresholds * that have already been satisfied and NULL the comparison (to * avoid a wrap condition in the HW). */ static int do_waitchks(struct host1x_job *job, struct host1x *host, struct host1x_bo *patch) { int i; /* compare syncpt vs wait threshold */ for (i = 0; i < job->num_waitchk; i++) { struct host1x_waitchk *wait = &job->waitchk[i]; struct host1x_syncpt *sp = host1x_syncpt_get(host, wait->syncpt_id); /* validate syncpt id */ if (wait->syncpt_id > host1x_syncpt_nb_pts(host)) continue; /* skip all other gathers */ if (patch != wait->bo) continue; trace_host1x_syncpt_wait_check(wait->bo, wait->offset, wait->syncpt_id, wait->thresh, host1x_syncpt_read_min(sp)); if (host1x_syncpt_is_expired(sp, wait->thresh)) { dev_dbg(host->dev, "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, sp->name, wait->thresh, host1x_syncpt_read_min(sp)); host1x_syncpt_patch_offset(sp, patch, wait->offset); } wait->bo = NULL; } return 0; } static unsigned int pin_job(struct host1x_job *job) { unsigned int i; job->num_unpins = 0; for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocarray[i]; struct sg_table *sgt; dma_addr_t phys_addr; reloc->target.bo = host1x_bo_get(reloc->target.bo); if (!reloc->target.bo) goto unpin; phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); if (!phys_addr) goto unpin; job->addr_phys[job->num_unpins] = phys_addr; job->unpins[job->num_unpins].bo = reloc->target.bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; } for (i = 0; i < job->num_gathers; i++) { struct host1x_job_gather *g = &job->gathers[i]; struct sg_table *sgt; dma_addr_t phys_addr; g->bo = host1x_bo_get(g->bo); if (!g->bo) goto unpin; phys_addr = host1x_bo_pin(g->bo, &sgt); if (!phys_addr) goto unpin; job->addr_phys[job->num_unpins] = phys_addr; job->unpins[job->num_unpins].bo = g->bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; } return job->num_unpins; unpin: host1x_job_unpin(job); return 0; } static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) { int i = 0; u32 last_page = ~0; void *cmdbuf_page_addr = NULL; /* pin & patch the relocs for one gather */ for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocarray[i]; u32 reloc_addr = (job->reloc_addr_phys[i] + reloc->target.offset) >> reloc->shift; u32 *target; /* skip all other gathers */ if (cmdbuf != reloc->cmdbuf.bo) continue; if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) { if (cmdbuf_page_addr) host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr); cmdbuf_page_addr = host1x_bo_kmap(cmdbuf, reloc->cmdbuf.offset >> PAGE_SHIFT); last_page = reloc->cmdbuf.offset >> PAGE_SHIFT; if (unlikely(!cmdbuf_page_addr)) { pr_err("Could not map cmdbuf for relocation\n"); return -ENOMEM; } } target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK); *target = reloc_addr; } if (cmdbuf_page_addr) host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr); return 0; } static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf, unsigned int offset) { offset *= sizeof(u32); if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset) return false; return true; } struct host1x_firewall { struct host1x_job *job; struct device *dev; unsigned int num_relocs; struct host1x_reloc *reloc; struct host1x_bo *cmdbuf; unsigned int offset; u32 words; u32 class; u32 reg; u32 mask; u32 count; }; static int check_register(struct host1x_firewall *fw, unsigned long offset) { if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) { if (!fw->num_relocs) return -EINVAL; if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset)) return -EINVAL; fw->num_relocs--; fw->reloc++; } return 0; } static int check_mask(struct host1x_firewall *fw) { u32 mask = fw->mask; u32 reg = fw->reg; int ret; while (mask) { if (fw->words == 0) return -EINVAL; if (mask & 1) { ret = check_register(fw, reg); if (ret < 0) return ret; fw->words--; fw->offset++; } mask >>= 1; reg++; } return 0; } static int check_incr(struct host1x_firewall *fw) { u32 count = fw->count; u32 reg = fw->reg; int ret; while (count) { if (fw->words == 0) return -EINVAL; ret = check_register(fw, reg); if (ret < 0) return ret; reg++; fw->words--; fw->offset++; count--; } return 0; } static int check_nonincr(struct host1x_firewall *fw) { u32 count = fw->count; int ret; while (count) { if (fw->words == 0) return -EINVAL; ret = check_register(fw, fw->reg); if (ret < 0) return ret; fw->words--; fw->offset++; count--; } return 0; } static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) { u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped + (g->offset / sizeof(u32)); int err = 0; if (!fw->job->is_addr_reg) return 0; fw->words = g->words; fw->cmdbuf = g->bo; fw->offset = 0; while (fw->words && !err) { u32 word = cmdbuf_base[fw->offset]; u32 opcode = (word & 0xf0000000) >> 28; fw->mask = 0; fw->reg = 0; fw->count = 0; fw->words--; fw->offset++; switch (opcode) { case 0: fw->class = word >> 6 & 0x3ff; fw->mask = word & 0x3f; fw->reg = word >> 16 & 0xfff; err = check_mask(fw); if (err) goto out; break; case 1: fw->reg = word >> 16 & 0xfff; fw->count = word & 0xffff; err = check_incr(fw); if (err) goto out; break; case 2: fw->reg = word >> 16 & 0xfff; fw->count = word & 0xffff; err = check_nonincr(fw); if (err) goto out; break; case 3: fw->mask = word & 0xffff; fw->reg = word >> 16 & 0xfff; err = check_mask(fw); if (err) goto out; break; case 4: case 5: case 14: break; default: err = -EINVAL; break; } } out: return err; } static inline int copy_gathers(struct host1x_job *job, struct device *dev) { struct host1x_firewall fw; size_t size = 0; size_t offset = 0; int i; fw.job = job; fw.dev = dev; fw.reloc = job->relocarray; fw.num_relocs = job->num_relocs; fw.class = 0; for (i = 0; i < job->num_gathers; i++) { struct host1x_job_gather *g = &job->gathers[i]; size += g->words * sizeof(u32); } job->gather_copy_mapped = dma_alloc_writecombine(dev, size, &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) { job->gather_copy_mapped = NULL; return -ENOMEM; } job->gather_copy_size = size; for (i = 0; i < job->num_gathers; i++) { struct host1x_job_gather *g = &job->gathers[i]; void *gather; /* Copy the gather */ gather = host1x_bo_mmap(g->bo); memcpy(job->gather_copy_mapped + offset, gather + g->offset, g->words * sizeof(u32)); host1x_bo_munmap(g->bo, gather); /* Store the location in the buffer */ g->base = job->gather_copy; g->offset = offset; /* Validate the job */ if (validate(&fw, g)) return -EINVAL; offset += g->words * sizeof(u32); } /* No relocs should remain at this point */ if (fw.num_relocs) return -EINVAL; return 0; } int host1x_job_pin(struct host1x_job *job, struct device *dev) { int err; unsigned int i, j; struct host1x *host = dev_get_drvdata(dev->parent); DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host)); bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host)); for (i = 0; i < job->num_waitchk; i++) { u32 syncpt_id = job->waitchk[i].syncpt_id; if (syncpt_id < host1x_syncpt_nb_pts(host)) set_bit(syncpt_id, waitchk_mask); } /* get current syncpt values for waitchk */ for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host)) host1x_syncpt_load(host->syncpt + i); /* pin memory */ err = pin_job(job); if (!err) goto out; /* patch gathers */ for (i = 0; i < job->num_gathers; i++) { struct host1x_job_gather *g = &job->gathers[i]; /* process each gather mem only once */ if (g->handled) continue; g->base = job->gather_addr_phys[i]; for (j = i + 1; j < job->num_gathers; j++) if (job->gathers[j].bo == g->bo) job->gathers[j].handled = true; err = do_relocs(job, g->bo); if (err) break; err = do_waitchks(job, host, g->bo); if (err) break; } if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) { err = copy_gathers(job, dev); if (err) { host1x_job_unpin(job); return err; } } out: wmb(); return err; } EXPORT_SYMBOL(host1x_job_pin); void host1x_job_unpin(struct host1x_job *job) { unsigned int i; for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; host1x_bo_unpin(unpin->bo, unpin->sgt); host1x_bo_put(unpin->bo); } job->num_unpins = 0; if (job->gather_copy_size) dma_free_writecombine(job->channel->dev, job->gather_copy_size, job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); /* * Debug routine used to dump job entries */ void host1x_job_dump(struct device *dev, struct host1x_job *job) { dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id); dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end); dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get); dev_dbg(dev, " TIMEOUT %d\n", job->timeout); dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots); dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins); }
gpl-2.0
sandymanu/android_kernel_xiaomi_kenzo
arch/s390/kernel/module.c
1687
14273
/* * Kernel module help for s390. * * S390 version * Copyright IBM Corp. 2002, 2003 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * * based on i386 version * Copyright (C) 2001 Rusty Russell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/moduleloader.h> #include <linux/bug.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt , ...) #endif #ifndef CONFIG_64BIT #define PLT_ENTRY_SIZE 12 #else /* CONFIG_64BIT */ #define PLT_ENTRY_SIZE 20 #endif /* CONFIG_64BIT */ #ifdef CONFIG_64BIT void *module_alloc(unsigned long size) { if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } #endif /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { if (mod) { vfree(mod->arch.syminfo); mod->arch.syminfo = NULL; } vfree(module_region); } static void check_rela(Elf_Rela *rela, struct module *me) { struct mod_arch_syminfo *info; info = me->arch.syminfo + ELF_R_SYM (rela->r_info); switch (ELF_R_TYPE (rela->r_info)) { case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT20: /* 20 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT64: /* 64 bit GOT offset. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ if (info->got_offset == -1UL) { info->got_offset = me->arch.got_size; me->arch.got_size += sizeof(void*); } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT64: /* 64 bit PC relative PLT address. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_offset == -1UL) { info->plt_offset = me->arch.plt_size; me->arch.plt_size += PLT_ENTRY_SIZE; } break; case R_390_COPY: case R_390_GLOB_DAT: case R_390_JMP_SLOT: case R_390_RELATIVE: /* Only needed if we want to support loading of modules linked with -shared. */ break; } } /* * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; Elf_Rela *rela; char *strings; int nrela, i, j; /* Find symbol table and string table. */ symtab = NULL; for (i = 0; i < hdr->e_shnum; i++) switch (sechdrs[i].sh_type) { case SHT_SYMTAB: symtab = sechdrs + i; break; } if (!symtab) { printk(KERN_ERR "module %s: no symbol table\n", me->name); return -ENOEXEC; } /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); me->arch.syminfo = vmalloc(me->arch.nsyms * sizeof(struct mod_arch_syminfo)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset; for (i = 0; i < me->arch.nsyms; i++) { if (symbols[i].st_shndx == SHN_UNDEF && strcmp(strings + symbols[i].st_name, "_GLOBAL_OFFSET_TABLE_") == 0) /* "Define" it as absolute. */ symbols[i].st_shndx = SHN_ABS; me->arch.syminfo[i].got_offset = -1UL; me->arch.syminfo[i].plt_offset = -1UL; me->arch.syminfo[i].got_initialized = 0; me->arch.syminfo[i].plt_initialized = 0; } /* Search for got/plt relocations. */ me->arch.got_size = me->arch.plt_size = 0; for (i = 0; i < hdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_RELA) continue; nrela = sechdrs[i].sh_size / sizeof(Elf_Rela); rela = (void *) hdr + sechdrs[i].sh_offset; for (j = 0; j < nrela; j++) check_rela(rela + j, me); } /* Increase core size by size of got & plt and set start offsets for got and plt. */ me->core_size = ALIGN(me->core_size, 4); me->arch.got_offset = me->core_size; me->core_size += me->arch.got_size; me->arch.plt_offset = me->core_size; me->core_size += me->arch.plt_size; return 0; } static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, int sign, int bits, int shift) { unsigned long umax; long min, max; if (val & ((1UL << shift) - 1)) return -ENOEXEC; if (sign) { val = (Elf_Addr)(((long) val) >> shift); min = -(1L << (bits - 1)); max = (1L << (bits - 1)) - 1; if ((long) val < min || (long) val > max) return -ENOEXEC; } else { val >>= shift; umax = ((1UL << (bits - 1)) << 1) - 1; if ((unsigned long) val > umax) return -ENOEXEC; } if (bits == 8) *(unsigned char *) loc = val; else if (bits == 12) *(unsigned short *) loc = (val & 0xfff) | (*(unsigned short *) loc & 0xf000); else if (bits == 16) *(unsigned short *) loc = val; else if (bits == 20) *(unsigned int *) loc = (val & 0xfff) << 16 | (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff); else if (bits == 32) *(unsigned int *) loc = val; else if (bits == 64) *(unsigned long *) loc = val; return 0; } static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, const char *strtab, struct module *me) { struct mod_arch_syminfo *info; Elf_Addr loc, val; int r_type, r_sym; int rc = -ENOEXEC; /* This is where to make the change */ loc = base + rela->r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ r_sym = ELF_R_SYM(rela->r_info); r_type = ELF_R_TYPE(rela->r_info); info = me->arch.syminfo + r_sym; val = symtab[r_sym].st_value; switch (r_type) { case R_390_NONE: /* No relocation. */ rc = 0; break; case R_390_8: /* Direct 8 bit. */ case R_390_12: /* Direct 12 bit. */ case R_390_16: /* Direct 16 bit. */ case R_390_20: /* Direct 20 bit. */ case R_390_32: /* Direct 32 bit. */ case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) rc = apply_rela_bits(loc, val, 0, 8, 0); else if (r_type == R_390_12) rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_20) rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_32) rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_64) rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ case R_390_PC32: /* PC relative 32 bit. */ case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) rc = apply_rela_bits(loc, val, 1, 16, 0); else if (r_type == R_390_PC16DBL) rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PC32DBL) rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PC32) rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_PC64) rc = apply_rela_bits(loc, val, 1, 64, 0); break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT20: /* 20 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT64: /* 64 bit GOT offset. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ if (info->got_initialized == 0) { Elf_Addr *gotent; gotent = me->module_core + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; } val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) rc = apply_rela_bits(loc, val, 0, 64, 0); else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) { val += (Elf_Addr) me->module_core - loc; rc = apply_rela_bits(loc, val, 1, 32, 1); } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT64: /* 64 bit PC relative PLT address. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; ip = me->module_core + me->arch.plt_offset + info->plt_offset; #ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ ip[1] = 0x100607f1; ip[2] = val; #else /* CONFIG_64BIT */ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; ip[2] = 0x07f10000; ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; #endif /* CONFIG_64BIT */ info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || r_type == R_390_PLTOFF32 || r_type == R_390_PLTOFF64) val = me->arch.plt_offset - me->arch.got_offset + info->plt_offset + rela->r_addend; else { if (!((r_type == R_390_PLT16DBL && val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) val = (Elf_Addr) me->module_core + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PLTOFF16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_PLT32DBL) rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - ((Elf_Addr) me->module_core + me->arch.got_offset); if (r_type == R_390_GOTOFF16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOTOFF64) rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->module_core + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_GOTPCDBL) rc = apply_rela_bits(loc, val, 1, 32, 1); break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ case R_390_JMP_SLOT: /* Create PLT entry. */ case R_390_RELATIVE: /* Adjust by program base. */ /* Only needed if we want to support loading of modules linked with -shared. */ return -ENOEXEC; default: printk(KERN_ERR "module %s: unknown relocation: %u\n", me->name, r_type); return -ENOEXEC; } if (rc) { printk(KERN_ERR "module %s: relocation error for symbol %s " "(r_type %i, value 0x%lx)\n", me->name, strtab + symtab[r_sym].st_name, r_type, (unsigned long) val); return rc; } return 0; } int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf_Addr base; Elf_Sym *symtab; Elf_Rela *rela; unsigned long i, n; int rc; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); base = sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf_Sym *) sechdrs[symindex].sh_addr; rela = (Elf_Rela *) sechdrs[relsec].sh_addr; n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { rc = apply_rela(rela, base, symtab, strtab, me); if (rc) return rc; } return 0; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { vfree(me->arch.syminfo); me->arch.syminfo = NULL; return 0; }
gpl-2.0
premaca/android_kernel_redmi2
arch/mips/bcm63xx/reset.c
1943
7412
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com> */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> #include <bcm63xx_reset.h> #define __GEN_RESET_BITS_TABLE(__cpu) \ [BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \ [BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \ [BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \ [BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \ [BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \ [BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \ [BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \ [BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \ [BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \ [BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \ [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \ [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT, #define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK #define BCM6328_RESET_ENET 0 #define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK #define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK #define BCM6328_RESET_DSL 0 #define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK #define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK #define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK #define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK #define BCM6328_RESET_MPI 0 #define BCM6328_RESET_PCIE \ (SOFTRESET_6328_PCIE_MASK | \ SOFTRESET_6328_PCIE_CORE_MASK | \ SOFTRESET_6328_PCIE_HARD_MASK) #define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK #define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK #define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK #define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK #define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK #define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK #define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK #define BCM6338_RESET_EPHY 0 #define BCM6338_RESET_ENETSW 0 #define BCM6338_RESET_PCM 0 #define BCM6338_RESET_MPI 0 #define BCM6338_RESET_PCIE 0 #define BCM6338_RESET_PCIE_EXT 0 #define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK #define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK #define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK #define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK #define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK #define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK #define BCM6348_RESET_EPHY 0 #define BCM6348_RESET_ENETSW 0 #define BCM6348_RESET_PCM 0 #define BCM6348_RESET_MPI 0 #define BCM6348_RESET_PCIE 0 #define BCM6348_RESET_PCIE_EXT 0 #define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK #define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK #define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK #define BCM6358_RESET_USBD 0 #define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK #define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK #define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK #define BCM6358_RESET_ENETSW 0 #define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK #define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK #define BCM6358_RESET_PCIE 0 #define BCM6358_RESET_PCIE_EXT 0 #define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK #define BCM6362_RESET_ENET 0 #define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK #define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK #define BCM6362_RESET_DSL 0 #define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK #define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK #define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK #define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK #define BCM6362_RESET_MPI 0 #define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \ SOFTRESET_6362_PCIE_CORE_MASK) #define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK #define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK #define BCM6368_RESET_ENET 0 #define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK #define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK #define BCM6368_RESET_DSL 0 #define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK #define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK #define BCM6368_RESET_ENETSW 0 #define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK #define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK #define BCM6368_RESET_PCIE 0 #define BCM6368_RESET_PCIE_EXT 0 #ifdef BCMCPU_RUNTIME_DETECT /* * core reset bits */ static const u32 bcm6328_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) }; static const u32 bcm6338_reset_bits[] = { __GEN_RESET_BITS_TABLE(6338) }; static const u32 bcm6348_reset_bits[] = { __GEN_RESET_BITS_TABLE(6348) }; static const u32 bcm6358_reset_bits[] = { __GEN_RESET_BITS_TABLE(6358) }; static const u32 bcm6362_reset_bits[] = { __GEN_RESET_BITS_TABLE(6362) }; static const u32 bcm6368_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) }; const u32 *bcm63xx_reset_bits; static int reset_reg; static int __init bcm63xx_reset_bits_init(void) { if (BCMCPU_IS_6328()) { reset_reg = PERF_SOFTRESET_6328_REG; bcm63xx_reset_bits = bcm6328_reset_bits; } else if (BCMCPU_IS_6338()) { reset_reg = PERF_SOFTRESET_REG; bcm63xx_reset_bits = bcm6338_reset_bits; } else if (BCMCPU_IS_6348()) { reset_reg = PERF_SOFTRESET_REG; bcm63xx_reset_bits = bcm6348_reset_bits; } else if (BCMCPU_IS_6358()) { reset_reg = PERF_SOFTRESET_6358_REG; bcm63xx_reset_bits = bcm6358_reset_bits; } else if (BCMCPU_IS_6362()) { reset_reg = PERF_SOFTRESET_6362_REG; bcm63xx_reset_bits = bcm6362_reset_bits; } else if (BCMCPU_IS_6368()) { reset_reg = PERF_SOFTRESET_6368_REG; bcm63xx_reset_bits = bcm6368_reset_bits; } return 0; } #else #ifdef CONFIG_BCM63XX_CPU_6328 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) }; #define reset_reg PERF_SOFTRESET_6328_REG #endif #ifdef CONFIG_BCM63XX_CPU_6338 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6338) }; #define reset_reg PERF_SOFTRESET_REG #endif #ifdef CONFIG_BCM63XX_CPU_6345 static const u32 bcm63xx_reset_bits[] = { }; #define reset_reg 0 #endif #ifdef CONFIG_BCM63XX_CPU_6348 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6348) }; #define reset_reg PERF_SOFTRESET_REG #endif #ifdef CONFIG_BCM63XX_CPU_6358 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6358) }; #define reset_reg PERF_SOFTRESET_6358_REG #endif #ifdef CONFIG_BCM63XX_CPU_6362 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6362) }; #define reset_reg PERF_SOFTRESET_6362_REG #endif #ifdef CONFIG_BCM63XX_CPU_6368 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) }; #define reset_reg PERF_SOFTRESET_6368_REG #endif static int __init bcm63xx_reset_bits_init(void) { return 0; } #endif static DEFINE_SPINLOCK(reset_mutex); static void __bcm63xx_core_set_reset(u32 mask, int enable) { unsigned long flags; u32 val; if (!mask) return; spin_lock_irqsave(&reset_mutex, flags); val = bcm_perf_readl(reset_reg); if (enable) val &= ~mask; else val |= mask; bcm_perf_writel(val, reset_reg); spin_unlock_irqrestore(&reset_mutex, flags); } void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset) { __bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset); } EXPORT_SYMBOL(bcm63xx_core_set_reset); postcore_initcall(bcm63xx_reset_bits_init);
gpl-2.0
CyanogenMod/samsung-kernel-galaxys
arch/arm/mach-at91/at91x40.c
1943
1914
/* * arch/arm/mach-at91/at91x40.c * * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com> * Copyright (C) 2005 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach/arch.h> #include <mach/at91x40.h> #include <mach/at91_st.h> #include <mach/timex.h> #include "generic.h" /* * Export the clock functions for the AT91X40. Some external code common * to all AT91 family parts relys on this, like the gpio and serial support. */ int clk_enable(struct clk *clk) { return 0; } void clk_disable(struct clk *clk) { } unsigned long clk_get_rate(struct clk *clk) { return AT91X40_MASTER_CLOCK; } struct clk *clk_get(struct device *dev, const char *id) { return NULL; } void __init at91x40_initialize(unsigned long main_clock) { at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1) | (1 << AT91X40_ID_IRQ2); } /* * The default interrupt priority levels (0 = lowest, 7 = highest). */ static unsigned int at91x40_default_irq_priority[NR_AIC_IRQS] __initdata = { 7, /* Advanced Interrupt Controller (FIQ) */ 0, /* System Peripherals */ 0, /* USART 0 */ 0, /* USART 1 */ 2, /* Timer Counter 0 */ 2, /* Timer Counter 1 */ 2, /* Timer Counter 2 */ 0, /* Watchdog timer */ 0, /* Parallel IO Controller A */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* External IRQ0 */ 0, /* External IRQ1 */ 0, /* External IRQ2 */ }; void __init at91x40_init_interrupts(unsigned int priority[NR_AIC_IRQS]) { if (!priority) priority = at91x40_default_irq_priority; at91_aic_init(priority); }
gpl-2.0
Keith-N/android_kernel_nvidia_ardbeg
arch/mips/bcm63xx/dev-flash.c
1943
3298
/* * Broadcom BCM63xx flash registration * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <bcm63xx_cpu.h> #include <bcm63xx_dev_flash.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> static struct mtd_partition mtd_partitions[] = { { .name = "cfe", .offset = 0x0, .size = 0x40000, } }; static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL }; static struct physmap_flash_data flash_data = { .width = 2, .parts = mtd_partitions, .part_probe_types = bcm63xx_part_types, }; static struct resource mtd_resources[] = { { .start = 0, /* filled at runtime */ .end = 0, /* filled at runtime */ .flags = IORESOURCE_MEM, } }; static struct platform_device mtd_dev = { .name = "physmap-flash", .resource = mtd_resources, .num_resources = ARRAY_SIZE(mtd_resources), .dev = { .platform_data = &flash_data, }, }; static int __init bcm63xx_detect_flash_type(void) { u32 val; switch (bcm63xx_get_cpu_id()) { case BCM6328_CPU_ID: val = bcm_misc_readl(MISC_STRAPBUS_6328_REG); if (val & STRAPBUS_6328_BOOT_SEL_SERIAL) return BCM63XX_FLASH_TYPE_SERIAL; else return BCM63XX_FLASH_TYPE_NAND; case BCM6338_CPU_ID: case BCM6345_CPU_ID: case BCM6348_CPU_ID: /* no way to auto detect so assume parallel */ return BCM63XX_FLASH_TYPE_PARALLEL; case BCM6358_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL) return BCM63XX_FLASH_TYPE_PARALLEL; else return BCM63XX_FLASH_TYPE_SERIAL; case BCM6362_CPU_ID: val = bcm_misc_readl(MISC_STRAPBUS_6362_REG); if (val & STRAPBUS_6362_BOOT_SEL_SERIAL) return BCM63XX_FLASH_TYPE_SERIAL; else return BCM63XX_FLASH_TYPE_NAND; case BCM6368_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); switch (val & STRAPBUS_6368_BOOT_SEL_MASK) { case STRAPBUS_6368_BOOT_SEL_NAND: return BCM63XX_FLASH_TYPE_NAND; case STRAPBUS_6368_BOOT_SEL_SERIAL: return BCM63XX_FLASH_TYPE_SERIAL; case STRAPBUS_6368_BOOT_SEL_PARALLEL: return BCM63XX_FLASH_TYPE_PARALLEL; } default: return -EINVAL; } } int __init bcm63xx_flash_register(void) { int flash_type; u32 val; flash_type = bcm63xx_detect_flash_type(); switch (flash_type) { case BCM63XX_FLASH_TYPE_PARALLEL: /* read base address of boot chip select (0) */ val = bcm_mpi_readl(MPI_CSBASE_REG(0)); val &= MPI_CSBASE_BASE_MASK; mtd_resources[0].start = val; mtd_resources[0].end = 0x1FFFFFFF; return platform_device_register(&mtd_dev); case BCM63XX_FLASH_TYPE_SERIAL: pr_warn("unsupported serial flash detected\n"); return -ENODEV; case BCM63XX_FLASH_TYPE_NAND: pr_warn("unsupported NAND flash detected\n"); return -ENODEV; default: pr_err("flash detection failed for BCM%x: %d\n", bcm63xx_get_cpu_id(), flash_type); return -ENODEV; } }
gpl-2.0
treznorx/android-tegra-2.6.36-gtablet
drivers/serial/nwpserial.c
3223
11584
/* * Serial Port driver for a NWP uart device * * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/init.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/serial_core.h> #include <linux/tty.h> #include <linux/irqreturn.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/nwpserial.h> #include <asm/prom.h> #include <asm/dcr.h> #define NWPSERIAL_NR 2 #define NWPSERIAL_STATUS_RXVALID 0x1 #define NWPSERIAL_STATUS_TXFULL 0x2 struct nwpserial_port { struct uart_port port; dcr_host_t dcr_host; unsigned int ier; unsigned int mcr; }; static DEFINE_MUTEX(nwpserial_mutex); static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; static void wait_for_bits(struct nwpserial_port *up, int bits) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = dcr_read(up->dcr_host, UART_LSR); if (--tmout == 0) break; udelay(1); } while ((status & bits) != bits); } #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE static void nwpserial_console_putchar(struct uart_port *port, int c) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* check if tx buffer is full */ wait_for_bits(up, UART_LSR_THRE); dcr_write(up->dcr_host, UART_TX, c); up->port.icount.tx++; } static void nwpserial_console_write(struct console *co, const char *s, unsigned int count) { struct nwpserial_port *up = &nwpserial_ports[co->index]; unsigned long flags; int locked = 1; if (oops_in_progress) locked = spin_trylock_irqsave(&up->port.lock, flags); else spin_lock_irqsave(&up->port.lock, flags); /* save and disable interrupt */ up->ier = dcr_read(up->dcr_host, UART_IER); dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); uart_console_write(&up->port, s, count, nwpserial_console_putchar); /* wait for transmitter to become empty */ while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) cpu_relax(); /* restore interrupt state */ dcr_write(up->dcr_host, UART_IER, up->ier); if (locked) spin_unlock_irqrestore(&up->port.lock, flags); } static struct uart_driver nwpserial_reg; static struct console nwpserial_console = { .name = "ttySQ", .write = nwpserial_console_write, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .data = &nwpserial_reg, }; #define NWPSERIAL_CONSOLE (&nwpserial_console) #else #define NWPSERIAL_CONSOLE NULL #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ /**************************************************************************/ static int nwpserial_request_port(struct uart_port *port) { return 0; } static void nwpserial_release_port(struct uart_port *port) { /* N/A */ } static void nwpserial_config_port(struct uart_port *port, int flags) { port->type = PORT_NWPSERIAL; } static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) { struct nwpserial_port *up = dev_id; struct tty_struct *tty = up->port.state->port.tty; irqreturn_t ret; unsigned int iir; unsigned char ch; spin_lock(&up->port.lock); /* check if the uart was the interrupt source. */ iir = dcr_read(up->dcr_host, UART_IIR); if (!iir) { ret = IRQ_NONE; goto out; } do { up->port.icount.rx++; ch = dcr_read(up->dcr_host, UART_RX); if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) tty_insert_flip_char(tty, ch, TTY_NORMAL); } while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR); tty_flip_buffer_push(tty); ret = IRQ_HANDLED; /* clear interrupt */ dcr_write(up->dcr_host, UART_IIR, 1); out: spin_unlock(&up->port.lock); return ret; } static int nwpserial_startup(struct uart_port *port) { struct nwpserial_port *up; int err; up = container_of(port, struct nwpserial_port, port); /* disable flow control by default */ up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; dcr_write(up->dcr_host, UART_MCR, up->mcr); /* register interrupt handler */ err = request_irq(up->port.irq, nwpserial_interrupt, IRQF_SHARED, "nwpserial", up); if (err) return err; /* enable interrupts */ up->ier = UART_IER_RDI; dcr_write(up->dcr_host, UART_IER, up->ier); /* enable receiving */ up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; return 0; } static void nwpserial_shutdown(struct uart_port *port) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* disable receiving */ up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; /* disable interrupts from this port */ up->ier = 0; dcr_write(up->dcr_host, UART_IER, up->ier); /* free irq */ free_irq(up->port.irq, port); } static int nwpserial_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } static const char *nwpserial_type(struct uart_port *port) { return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; } static void nwpserial_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID | NWPSERIAL_STATUS_TXFULL; up->port.ignore_status_mask = 0; /* ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; /* Copy back the old hardware settings */ if (old) tty_termios_copy_hw(termios, old); } static void nwpserial_break_ctl(struct uart_port *port, int ctl) { /* N/A */ } static void nwpserial_enable_ms(struct uart_port *port) { /* N/A */ } static void nwpserial_stop_rx(struct uart_port *port) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* don't forward any more data (like !CREAD) */ up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; } static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) { /* check if tx buffer is full */ wait_for_bits(up, UART_LSR_THRE); dcr_write(up->dcr_host, UART_TX, c); up->port.icount.tx++; } static void nwpserial_start_tx(struct uart_port *port) { struct nwpserial_port *up; struct circ_buf *xmit; up = container_of(port, struct nwpserial_port, port); xmit = &up->port.state->xmit; if (port->x_char) { nwpserial_putchar(up, up->port.x_char); port->x_char = 0; } while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { nwpserial_putchar(up, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); } } static unsigned int nwpserial_get_mctrl(struct uart_port *port) { return 0; } static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) { /* N/A */ } static void nwpserial_stop_tx(struct uart_port *port) { /* N/A */ } static unsigned int nwpserial_tx_empty(struct uart_port *port) { struct nwpserial_port *up; unsigned long flags; int ret; up = container_of(port, struct nwpserial_port, port); spin_lock_irqsave(&up->port.lock, flags); ret = dcr_read(up->dcr_host, UART_LSR); spin_unlock_irqrestore(&up->port.lock, flags); return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; } static struct uart_ops nwpserial_pops = { .tx_empty = nwpserial_tx_empty, .set_mctrl = nwpserial_set_mctrl, .get_mctrl = nwpserial_get_mctrl, .stop_tx = nwpserial_stop_tx, .start_tx = nwpserial_start_tx, .stop_rx = nwpserial_stop_rx, .enable_ms = nwpserial_enable_ms, .break_ctl = nwpserial_break_ctl, .startup = nwpserial_startup, .shutdown = nwpserial_shutdown, .set_termios = nwpserial_set_termios, .type = nwpserial_type, .release_port = nwpserial_release_port, .request_port = nwpserial_request_port, .config_port = nwpserial_config_port, .verify_port = nwpserial_verify_port, }; static struct uart_driver nwpserial_reg = { .owner = THIS_MODULE, .driver_name = "nwpserial", .dev_name = "ttySQ", .major = TTY_MAJOR, .minor = 68, .nr = NWPSERIAL_NR, .cons = NWPSERIAL_CONSOLE, }; int nwpserial_register_port(struct uart_port *port) { struct nwpserial_port *up = NULL; int ret = -1; int i; static int first = 1; int dcr_len; int dcr_base; struct device_node *dn; mutex_lock(&nwpserial_mutex); dn = port->dev->of_node; if (dn == NULL) goto out; /* get dcr base. */ dcr_base = dcr_resource_start(dn, 0); /* find matching entry */ for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.iobase == dcr_base) { up = &nwpserial_ports[i]; break; } /* we didn't find a mtching entry, search for a free port */ if (up == NULL) for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.type == PORT_UNKNOWN && nwpserial_ports[i].port.iobase == 0) { up = &nwpserial_ports[i]; break; } if (up == NULL) { ret = -EBUSY; goto out; } if (first) uart_register_driver(&nwpserial_reg); first = 0; up->port.membase = port->membase; up->port.irq = port->irq; up->port.uartclk = port->uartclk; up->port.fifosize = port->fifosize; up->port.regshift = port->regshift; up->port.iotype = port->iotype; up->port.flags = port->flags; up->port.mapbase = port->mapbase; up->port.private_data = port->private_data; if (port->dev) up->port.dev = port->dev; if (up->port.iobase != dcr_base) { up->port.ops = &nwpserial_pops; up->port.fifosize = 16; spin_lock_init(&up->port.lock); up->port.iobase = dcr_base; dcr_len = dcr_resource_len(dn, 0); up->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(up->dcr_host)) { printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); goto out; } } ret = uart_add_one_port(&nwpserial_reg, &up->port); if (ret == 0) ret = up->port.line; out: mutex_unlock(&nwpserial_mutex); return ret; } EXPORT_SYMBOL(nwpserial_register_port); void nwpserial_unregister_port(int line) { struct nwpserial_port *up = &nwpserial_ports[line]; mutex_lock(&nwpserial_mutex); uart_remove_one_port(&nwpserial_reg, &up->port); up->port.type = PORT_UNKNOWN; mutex_unlock(&nwpserial_mutex); } EXPORT_SYMBOL(nwpserial_unregister_port); #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE static int __init nwpserial_console_init(void) { struct nwpserial_port *up = NULL; struct device_node *dn; const char *name; int dcr_base; int dcr_len; int i; /* search for a free port */ for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { up = &nwpserial_ports[i]; break; } if (up == NULL) return -1; name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) return -1; dn = of_find_node_by_path(name); if (!dn) return -1; spin_lock_init(&up->port.lock); up->port.ops = &nwpserial_pops; up->port.type = PORT_NWPSERIAL; up->port.fifosize = 16; dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); up->port.iobase = dcr_base; up->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(up->dcr_host)) { printk("Cannot map DCR resources for SERIAL"); return -1; } register_console(&nwpserial_console); return 0; } console_initcall(nwpserial_console_init); #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
gpl-2.0
sinoory/linux3.19
fs/dcookies.c
3735
7049
/* * dcookies.c * * Copyright 2002 John Levon <levon@movementarian.org> * * Persistent cookie-path mappings. These are used by * profilers to convert a per-task EIP value into something * non-transitory that can be processed at a later date. * This is done by locking the dentry/vfsmnt pair in the * kernel until released by the tasks needing the persistent * objects. The tag is simply an unsigned long that refers * to the pair and can be looked up from userspace. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mount.h> #include <linux/capability.h> #include <linux/dcache.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/dcookies.h> #include <linux/mutex.h> #include <linux/path.h> #include <linux/compat.h> #include <asm/uaccess.h> /* The dcookies are allocated from a kmem_cache and * hashed onto a small number of lists. None of the * code here is particularly performance critical */ struct dcookie_struct { struct path path; struct list_head hash_list; }; static LIST_HEAD(dcookie_users); static DEFINE_MUTEX(dcookie_mutex); static struct kmem_cache *dcookie_cache __read_mostly; static struct list_head *dcookie_hashtable __read_mostly; static size_t hash_size __read_mostly; static inline int is_live(void) { return !(list_empty(&dcookie_users)); } /* The dentry is locked, its address will do for the cookie */ static inline unsigned long dcookie_value(struct dcookie_struct * dcs) { return (unsigned long)dcs->path.dentry; } static size_t dcookie_hash(unsigned long dcookie) { return (dcookie >> L1_CACHE_SHIFT) & (hash_size - 1); } static struct dcookie_struct * find_dcookie(unsigned long dcookie) { struct dcookie_struct *found = NULL; struct dcookie_struct * dcs; struct list_head * pos; struct list_head * list; list = dcookie_hashtable + dcookie_hash(dcookie); list_for_each(pos, list) { dcs = list_entry(pos, struct dcookie_struct, hash_list); if (dcookie_value(dcs) == dcookie) { found = dcs; break; } } return found; } static void hash_dcookie(struct dcookie_struct * dcs) { struct list_head * list = dcookie_hashtable + dcookie_hash(dcookie_value(dcs)); list_add(&dcs->hash_list, list); } static struct dcookie_struct *alloc_dcookie(struct path *path) { struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL); struct dentry *d; if (!dcs) return NULL; d = path->dentry; spin_lock(&d->d_lock); d->d_flags |= DCACHE_COOKIE; spin_unlock(&d->d_lock); dcs->path = *path; path_get(path); hash_dcookie(dcs); return dcs; } /* This is the main kernel-side routine that retrieves the cookie * value for a dentry/vfsmnt pair. */ int get_dcookie(struct path *path, unsigned long *cookie) { int err = 0; struct dcookie_struct * dcs; mutex_lock(&dcookie_mutex); if (!is_live()) { err = -EINVAL; goto out; } if (path->dentry->d_flags & DCACHE_COOKIE) { dcs = find_dcookie((unsigned long)path->dentry); } else { dcs = alloc_dcookie(path); if (!dcs) { err = -ENOMEM; goto out; } } *cookie = dcookie_value(dcs); out: mutex_unlock(&dcookie_mutex); return err; } /* And here is where the userspace process can look up the cookie value * to retrieve the path. */ SYSCALL_DEFINE3(lookup_dcookie, u64, cookie64, char __user *, buf, size_t, len) { unsigned long cookie = (unsigned long)cookie64; int err = -EINVAL; char * kbuf; char * path; size_t pathlen; struct dcookie_struct * dcs; /* we could leak path information to users * without dir read permission without this */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&dcookie_mutex); if (!is_live()) { err = -EINVAL; goto out; } if (!(dcs = find_dcookie(cookie))) goto out; err = -ENOMEM; kbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!kbuf) goto out; /* FIXME: (deleted) ? */ path = d_path(&dcs->path, kbuf, PAGE_SIZE); mutex_unlock(&dcookie_mutex); if (IS_ERR(path)) { err = PTR_ERR(path); goto out_free; } err = -ERANGE; pathlen = kbuf + PAGE_SIZE - path; if (pathlen <= len) { err = pathlen; if (copy_to_user(buf, path, pathlen)) err = -EFAULT; } out_free: kfree(kbuf); return err; out: mutex_unlock(&dcookie_mutex); return err; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len) { #ifdef __BIG_ENDIAN return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len); #else return sys_lookup_dcookie(((u64)w1 << 32) | w0, buf, len); #endif } #endif static int dcookie_init(void) { struct list_head * d; unsigned int i, hash_bits; int err = -ENOMEM; dcookie_cache = kmem_cache_create("dcookie_cache", sizeof(struct dcookie_struct), 0, 0, NULL); if (!dcookie_cache) goto out; dcookie_hashtable = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!dcookie_hashtable) goto out_kmem; err = 0; /* * Find the power-of-two list-heads that can fit into the allocation.. * We don't guarantee that "sizeof(struct list_head)" is necessarily * a power-of-two. */ hash_size = PAGE_SIZE / sizeof(struct list_head); hash_bits = 0; do { hash_bits++; } while ((hash_size >> hash_bits) != 0); hash_bits--; /* * Re-calculate the actual number of entries and the mask * from the number of bits we can fit. */ hash_size = 1UL << hash_bits; /* And initialize the newly allocated array */ d = dcookie_hashtable; i = hash_size; do { INIT_LIST_HEAD(d); d++; i--; } while (i); out: return err; out_kmem: kmem_cache_destroy(dcookie_cache); goto out; } static void free_dcookie(struct dcookie_struct * dcs) { struct dentry *d = dcs->path.dentry; spin_lock(&d->d_lock); d->d_flags &= ~DCACHE_COOKIE; spin_unlock(&d->d_lock); path_put(&dcs->path); kmem_cache_free(dcookie_cache, dcs); } static void dcookie_exit(void) { struct list_head * list; struct list_head * pos; struct list_head * pos2; struct dcookie_struct * dcs; size_t i; for (i = 0; i < hash_size; ++i) { list = dcookie_hashtable + i; list_for_each_safe(pos, pos2, list) { dcs = list_entry(pos, struct dcookie_struct, hash_list); list_del(&dcs->hash_list); free_dcookie(dcs); } } kfree(dcookie_hashtable); kmem_cache_destroy(dcookie_cache); } struct dcookie_user { struct list_head next; }; struct dcookie_user * dcookie_register(void) { struct dcookie_user * user; mutex_lock(&dcookie_mutex); user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); if (!user) goto out; if (!is_live() && dcookie_init()) goto out_free; list_add(&user->next, &dcookie_users); out: mutex_unlock(&dcookie_mutex); return user; out_free: kfree(user); user = NULL; goto out; } void dcookie_unregister(struct dcookie_user * user) { mutex_lock(&dcookie_mutex); list_del(&user->next); kfree(user); if (!is_live()) dcookie_exit(); mutex_unlock(&dcookie_mutex); } EXPORT_SYMBOL_GPL(dcookie_register); EXPORT_SYMBOL_GPL(dcookie_unregister); EXPORT_SYMBOL_GPL(get_dcookie);
gpl-2.0
SOKP/kernel_xiaomi_cancro
arch/sh/boards/board-magicpanelr2.c
4503
10719
/* * linux/arch/sh/boards/magicpanel/setup.c * * Copyright (C) 2007 Markus Brunner, Mark Jonas * * Magic Panel Release 2 board setup * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/smsc911x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/map.h> #include <mach/magicpanelr2.h> #include <asm/heartbeat.h> #include <cpu/sh7720.h> #define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL) /* Wait until reset finished. Timeout is 100ms. */ static int __init ethernet_reset_finished(void) { int i; if (LAN9115_READY) return 1; for (i = 0; i < 10; ++i) { mdelay(10); if (LAN9115_READY) return 1; } return 0; } static void __init reset_ethernet(void) { /* PMDR: LAN_RESET=on */ CLRBITS_OUTB(0x10, PORT_PMDR); udelay(200); /* PMDR: LAN_RESET=off */ SETBITS_OUTB(0x10, PORT_PMDR); } static void __init setup_chip_select(void) { /* CS2: LAN (0x08000000 - 0x0bffffff) */ /* no idle cycles, normal space, 8 bit data bus */ __raw_writel(0x36db0400, CS2BCR); /* (SW:1.5 WR:3 HW:1.5), ext. wait */ __raw_writel(0x000003c0, CS2WCR); /* CS4: CAN1 (0xb0000000 - 0xb3ffffff) */ /* no idle cycles, normal space, 8 bit data bus */ __raw_writel(0x00000200, CS4BCR); /* (SW:1.5 WR:3 HW:1.5), ext. wait */ __raw_writel(0x00100981, CS4WCR); /* CS5a: CAN2 (0xb4000000 - 0xb5ffffff) */ /* no idle cycles, normal space, 8 bit data bus */ __raw_writel(0x00000200, CS5ABCR); /* (SW:1.5 WR:3 HW:1.5), ext. wait */ __raw_writel(0x00100981, CS5AWCR); /* CS5b: CAN3 (0xb6000000 - 0xb7ffffff) */ /* no idle cycles, normal space, 8 bit data bus */ __raw_writel(0x00000200, CS5BBCR); /* (SW:1.5 WR:3 HW:1.5), ext. wait */ __raw_writel(0x00100981, CS5BWCR); /* CS6a: Rotary (0xb8000000 - 0xb9ffffff) */ /* no idle cycles, normal space, 8 bit data bus */ __raw_writel(0x00000200, CS6ABCR); /* (SW:1.5 WR:3 HW:1.5), no ext. wait */ __raw_writel(0x001009C1, CS6AWCR); } static void __init setup_port_multiplexing(void) { /* A7 GPO(LED8); A6 GPO(LED7); A5 GPO(LED6); A4 GPO(LED5); * A3 GPO(LED4); A2 GPO(LED3); A1 GPO(LED2); A0 GPO(LED1); */ __raw_writew(0x5555, PORT_PACR); /* 01 01 01 01 01 01 01 01 */ /* B7 GPO(RST4); B6 GPO(RST3); B5 GPO(RST2); B4 GPO(RST1); * B3 GPO(PB3); B2 GPO(PB2); B1 GPO(PB1); B0 GPO(PB0); */ __raw_writew(0x5555, PORT_PBCR); /* 01 01 01 01 01 01 01 01 */ /* C7 GPO(PC7); C6 GPO(PC6); C5 GPO(PC5); C4 GPO(PC4); * C3 LCD_DATA3; C2 LCD_DATA2; C1 LCD_DATA1; C0 LCD_DATA0; */ __raw_writew(0x5500, PORT_PCCR); /* 01 01 01 01 00 00 00 00 */ /* D7 GPO(PD7); D6 GPO(PD6); D5 GPO(PD5); D4 GPO(PD4); * D3 GPO(PD3); D2 GPO(PD2); D1 GPO(PD1); D0 GPO(PD0); */ __raw_writew(0x5555, PORT_PDCR); /* 01 01 01 01 01 01 01 01 */ /* E7 (x); E6 GPI(nu); E5 GPI(nu); E4 LCD_M_DISP; * E3 LCD_CL1; E2 LCD_CL2; E1 LCD_DON; E0 LCD_FLM; */ __raw_writew(0x3C00, PORT_PECR); /* 00 11 11 00 00 00 00 00 */ /* F7 (x); F6 DA1(VLCD); F5 DA0(nc); F4 AN3; * F3 AN2(MID_AD); F2 AN1(EARTH_AD); F1 AN0(TEMP); F0 GPI+(nc); */ __raw_writew(0x0002, PORT_PFCR); /* 00 00 00 00 00 00 00 10 */ /* G7 (x); G6 IRQ5(TOUCH_BUSY); G5 IRQ4(TOUCH_IRQ); G4 GPI(KEY2); * G3 GPI(KEY1); G2 GPO(LED11); G1 GPO(LED10); G0 GPO(LED9); */ __raw_writew(0x03D5, PORT_PGCR); /* 00 00 00 11 11 01 01 01 */ /* H7 (x); H6 /RAS(BRAS); H5 /CAS(BCAS); H4 CKE(BCKE); * H3 GPO(EARTH_OFF); H2 GPO(EARTH_TEST); H1 USB2_PWR; H0 USB1_PWR; */ __raw_writew(0x0050, PORT_PHCR); /* 00 00 00 00 01 01 00 00 */ /* J7 (x); J6 AUDCK; J5 ASEBRKAK; J4 AUDATA3; * J3 AUDATA2; J2 AUDATA1; J1 AUDATA0; J0 AUDSYNC; */ __raw_writew(0x0000, PORT_PJCR); /* 00 00 00 00 00 00 00 00 */ /* K7 (x); K6 (x); K5 (x); K4 (x); * K3 PINT7(/PWR2); K2 PINT6(/PWR1); K1 PINT5(nu); K0 PINT4(FLASH_READY) */ __raw_writew(0x00FF, PORT_PKCR); /* 00 00 00 00 11 11 11 11 */ /* L7 TRST; L6 TMS; L5 TDO; L4 TDI; * L3 TCK; L2 (x); L1 (x); L0 (x); */ __raw_writew(0x0000, PORT_PLCR); /* 00 00 00 00 00 00 00 00 */ /* M7 GPO(CURRENT_SINK); M6 GPO(PWR_SWITCH); M5 GPO(LAN_SPEED); * M4 GPO(LAN_RESET); M3 GPO(BUZZER); M2 GPO(LCD_BL); * M1 CS5B(CAN3_CS); M0 GPI+(nc); */ __raw_writew(0x5552, PORT_PMCR); /* 01 01 01 01 01 01 00 10 */ /* CURRENT_SINK=off, PWR_SWITCH=off, LAN_SPEED=100MBit, * LAN_RESET=off, BUZZER=off, LCD_BL=off */ #if CONFIG_SH_MAGIC_PANEL_R2_VERSION == 2 __raw_writeb(0x30, PORT_PMDR); #elif CONFIG_SH_MAGIC_PANEL_R2_VERSION == 3 __raw_writeb(0xF0, PORT_PMDR); #else #error Unknown revision of PLATFORM_MP_R2 #endif /* P7 (x); P6 (x); P5 (x); * P4 GPO(nu); P3 IRQ3(LAN_IRQ); P2 IRQ2(CAN3_IRQ); * P1 IRQ1(CAN2_IRQ); P0 IRQ0(CAN1_IRQ) */ __raw_writew(0x0100, PORT_PPCR); /* 00 00 00 01 00 00 00 00 */ __raw_writeb(0x10, PORT_PPDR); /* R7 A25; R6 A24; R5 A23; R4 A22; * R3 A21; R2 A20; R1 A19; R0 A0; */ gpio_request(GPIO_FN_A25, NULL); gpio_request(GPIO_FN_A24, NULL); gpio_request(GPIO_FN_A23, NULL); gpio_request(GPIO_FN_A22, NULL); gpio_request(GPIO_FN_A21, NULL); gpio_request(GPIO_FN_A20, NULL); gpio_request(GPIO_FN_A19, NULL); gpio_request(GPIO_FN_A0, NULL); /* S7 (x); S6 (x); S5 (x); S4 GPO(EEPROM_CS2); * S3 GPO(EEPROM_CS1); S2 SIOF0_TXD; S1 SIOF0_RXD; S0 SIOF0_SCK; */ __raw_writew(0x0140, PORT_PSCR); /* 00 00 00 01 01 00 00 00 */ /* T7 (x); T6 (x); T5 (x); T4 COM1_CTS; * T3 COM1_RTS; T2 COM1_TXD; T1 COM1_RXD; T0 GPO(WDOG) */ __raw_writew(0x0001, PORT_PTCR); /* 00 00 00 00 00 00 00 01 */ /* U7 (x); U6 (x); U5 (x); U4 GPI+(/AC_FAULT); * U3 GPO(TOUCH_CS); U2 TOUCH_TXD; U1 TOUCH_RXD; U0 TOUCH_SCK; */ __raw_writew(0x0240, PORT_PUCR); /* 00 00 00 10 01 00 00 00 */ /* V7 (x); V6 (x); V5 (x); V4 GPO(MID2); * V3 GPO(MID1); V2 CARD_TxD; V1 CARD_RxD; V0 GPI+(/BAT_FAULT); */ __raw_writew(0x0142, PORT_PVCR); /* 00 00 00 01 01 00 00 10 */ } static void __init mpr2_setup(char **cmdline_p) { /* set Pin Select Register A: * /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2, * /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND */ __raw_writew(0xAABC, PORT_PSELA); /* set Pin Select Register B: * /SCIF0_RTS, /SCIF0_CTS, LCD_VCPWC, * LCD_VEPWC, IIC_SDA, IIC_SCL, Reserved */ __raw_writew(0x3C00, PORT_PSELB); /* set Pin Select Register C: * SIOF1_SCK, SIOF1_RxD, SCIF1_RxD, SCIF1_TxD, Reserved */ __raw_writew(0x0000, PORT_PSELC); /* set Pin Select Register D: Reserved, SIOF1_TxD, Reserved, SIOF1_MCLK, * Reserved, SIOF1_SYNC, Reserved, SCIF1_SCK, Reserved */ __raw_writew(0x0000, PORT_PSELD); /* set USB TxRx Control: Reserved, DRV, Reserved, USB_TRANS, USB_SEL */ __raw_writew(0x0101, PORT_UTRCTL); /* set USB Clock Control: USSCS, USSTB, Reserved (HighByte always A5) */ __raw_writew(0xA5C0, PORT_UCLKCR_W); setup_chip_select(); setup_port_multiplexing(); reset_ethernet(); printk(KERN_INFO "Magic Panel Release 2 A.%i\n", CONFIG_SH_MAGIC_PANEL_R2_VERSION); if (ethernet_reset_finished() == 0) printk(KERN_WARNING "Ethernet not ready\n"); } static struct resource smsc911x_resources[] = { [0] = { .start = 0xa8000000, .end = 0xabffffff, .flags = IORESOURCE_MEM, }, [1] = { .start = 35, .end = 35, .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static struct resource heartbeat_resources[] = { [0] = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM, }, }; static struct heartbeat_data heartbeat_data = { .flags = HEARTBEAT_INVERTED, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = ARRAY_SIZE(heartbeat_resources), .resource = heartbeat_resources, }; static struct mtd_partition mpr2_partitions[] = { /* Reserved for bootloader, read-only */ { .name = "Bootloader", .offset = 0x00000000UL, .size = MPR2_MTD_BOOTLOADER_SIZE, .mask_flags = MTD_WRITEABLE, }, /* Reserved for kernel image */ { .name = "Kernel", .offset = MTDPART_OFS_NXTBLK, .size = MPR2_MTD_KERNEL_SIZE, }, /* Rest is used for Flash FS */ { .name = "Flash_FS", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, } }; static struct physmap_flash_data flash_data = { .parts = mpr2_partitions, .nr_parts = ARRAY_SIZE(mpr2_partitions), .width = 2, }; static struct resource flash_resource = { .start = 0x00000000, .end = 0x2000000UL, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device = { .name = "physmap-flash", .id = -1, .resource = &flash_resource, .num_resources = 1, .dev = { .platform_data = &flash_data, }, }; /* * Add all resources to the platform_device */ static struct platform_device *mpr2_devices[] __initdata = { &heartbeat_device, &smsc911x_device, &flash_device, }; static int __init mpr2_devices_setup(void) { return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices)); } device_initcall(mpr2_devices_setup); /* * Initialize IRQ setting */ static void __init init_mpr2_IRQ(void) { plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */ irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ intc_set_priority(32, 13); /* IRQ0 CAN1 */ intc_set_priority(33, 13); /* IRQ0 CAN2 */ intc_set_priority(34, 13); /* IRQ0 CAN3 */ intc_set_priority(35, 6); /* IRQ3 SMSC9115 */ } /* * The Machine Vector */ static struct sh_machine_vector mv_mpr2 __initmv = { .mv_name = "mpr2", .mv_setup = mpr2_setup, .mv_init_irq = init_mpr2_IRQ, };
gpl-2.0
DecimalMan/dkp
arch/m68k/platform/5272/config.c
4503
2982
/***************************************************************************/ /* * linux/arch/m68knommu/platform/5272/config.c * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2001-2002, SnapGear Inc. (www.snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> /***************************************************************************/ /* * Some platforms need software versions of the GPIO data registers. */ unsigned short ppdata; unsigned char ledbank = 0xff; /***************************************************************************/ static void __init m5272_uarts_init(void) { u32 v; /* Enable the output lines for the serial ports */ v = readl(MCF_MBAR + MCFSIM_PBCNT); v = (v & ~0x000000ff) | 0x00000055; writel(v, MCF_MBAR + MCFSIM_PBCNT); v = readl(MCF_MBAR + MCFSIM_PDCNT); v = (v & ~0x000003fc) | 0x000002a8; writel(v, MCF_MBAR + MCFSIM_PDCNT); } /***************************************************************************/ static void m5272_cpu_reset(void) { local_irq_disable(); /* Set watchdog to reset, and enabled */ __raw_writew(0, MCF_MBAR + MCFSIM_WIRR); __raw_writew(1, MCF_MBAR + MCFSIM_WRRR); __raw_writew(0, MCF_MBAR + MCFSIM_WCR); for (;;) /* wait for watchdog to timeout */; } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { #if defined (CONFIG_MOD5272) volatile unsigned char *pivrp; /* Set base of device vectors to be 64 */ pivrp = (volatile unsigned char *) (MCF_MBAR + MCFSIM_PIVR); *pivrp = 0x40; #endif #if defined(CONFIG_NETtel) || defined(CONFIG_SCALES) /* Copy command line from FLASH to local buffer... */ memcpy(commandp, (char *) 0xf0004000, size); commandp[size-1] = 0; #elif defined(CONFIG_CANCam) /* Copy command line from FLASH to local buffer... */ memcpy(commandp, (char *) 0xf0010000, size); commandp[size-1] = 0; #endif mach_reset = m5272_cpu_reset; mach_sched_init = hw_timer_init; } /***************************************************************************/ /* * Some 5272 based boards have the FEC ethernet diectly connected to * an ethernet switch. In this case we need to use the fixed phy type, * and we need to declare it early in boot. */ static struct fixed_phy_status nettel_fixed_phy_status __initdata = { .link = 1, .speed = 100, .duplex = 0, }; /***************************************************************************/ static int __init init_BSP(void) { m5272_uarts_init(); fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status); return 0; } arch_initcall(init_BSP); /***************************************************************************/
gpl-2.0
pinkflozd/L5_Kernel_3.4
drivers/net/tokenring/madgemc.c
4759
21053
/* * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card. * * Written 2000 by Adam Fritzler * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver module supports the following cards: * - Madge Smart 16/4 Ringnode MC16 * - Madge Smart 16/4 Ringnode MC32 (??) * * Maintainer(s): * AF Adam Fritzler * * Modification History: * 16-Jan-00 AF Created * */ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; #include <linux/module.h> #include <linux/mca.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <asm/io.h> #include <asm/irq.h> #include "tms380tr.h" #include "madgemc.h" /* Madge-specific constants */ #define MADGEMC_IO_EXTENT 32 #define MADGEMC_SIF_OFFSET 0x08 struct card_info { /* * These are read from the BIA ROM. */ unsigned int manid; unsigned int cardtype; unsigned int cardrev; unsigned int ramsize; /* * These are read from the MCA POS registers. */ unsigned int burstmode:2; unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */ unsigned int arblevel:4; unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */ unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */ }; static int madgemc_open(struct net_device *dev); static int madgemc_close(struct net_device *dev); static int madgemc_chipset_init(struct net_device *dev); static void madgemc_read_rom(struct net_device *dev, struct card_info *card); static unsigned short madgemc_setnselout_pins(struct net_device *dev); static void madgemc_setcabletype(struct net_device *dev, int type); static int madgemc_mcaproc(char *buf, int slot, void *d); static void madgemc_setregpage(struct net_device *dev, int page); static void madgemc_setsifsel(struct net_device *dev, int val); static void madgemc_setint(struct net_device *dev, int val); static irqreturn_t madgemc_interrupt(int irq, void *dev_id); /* * These work around paging, however they don't guarantee you're on the * right page. */ #define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) #define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) #define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) #define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) /* * Read a byte-length value from the register. */ static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg) { unsigned short ret; if (reg<0x8) ret = SIFREADB(reg); else { madgemc_setregpage(dev, 1); ret = SIFREADB(reg); madgemc_setregpage(dev, 0); } return ret; } /* * Write a byte-length value to a register. */ static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) { if (reg<0x8) SIFWRITEB(val, reg); else { madgemc_setregpage(dev, 1); SIFWRITEB(val, reg); madgemc_setregpage(dev, 0); } } /* * Read a word-length value from a register */ static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg) { unsigned short ret; if (reg<0x8) ret = SIFREADW(reg); else { madgemc_setregpage(dev, 1); ret = SIFREADW(reg); madgemc_setregpage(dev, 0); } return ret; } /* * Write a word-length value to a register. */ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) { if (reg<0x8) SIFWRITEW(val, reg); else { madgemc_setregpage(dev, 1); SIFWRITEW(val, reg); madgemc_setregpage(dev, 0); } } static struct net_device_ops madgemc_netdev_ops __read_mostly; static int __devinit madgemc_probe(struct device *device) { static int versionprinted; struct net_device *dev; struct net_local *tp; struct card_info *card; struct mca_device *mdev = to_mca_device(device); int ret = 0; if (versionprinted++ == 0) printk("%s", version); if(mca_device_claimed(mdev)) return -EBUSY; mca_device_set_claim(mdev, 1); dev = alloc_trdev(sizeof(struct net_local)); if (!dev) { printk("madgemc: unable to allocate dev space\n"); mca_device_set_claim(mdev, 0); ret = -ENOMEM; goto getout; } dev->netdev_ops = &madgemc_netdev_ops; card = kmalloc(sizeof(struct card_info), GFP_KERNEL); if (card==NULL) { ret = -ENOMEM; goto getout1; } /* * Parse configuration information. This all comes * directly from the publicly available @002d.ADF. * Get it from Madge or your local ADF library. */ /* * Base address */ dev->base_addr = 0x0a20 + ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) + ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) + ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0); /* * Interrupt line */ switch(mdev->pos[0] >> 6) { /* upper two bits */ case 0x1: dev->irq = 3; break; case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */ case 0x3: dev->irq = 10; break; default: dev->irq = 0; break; } if (dev->irq == 0) { printk("%s: invalid IRQ\n", dev->name); ret = -EBUSY; goto getout2; } if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, "madgemc")) { printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr); dev->base_addr += MADGEMC_SIF_OFFSET; ret = -EBUSY; goto getout2; } dev->base_addr += MADGEMC_SIF_OFFSET; /* * Arbitration Level */ card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8; /* * Burst mode and Fairness */ card->burstmode = ((mdev->pos[2] >> 6) & 0x3); card->fairness = ((mdev->pos[2] >> 4) & 0x1); /* * Ring Speed */ if ((mdev->pos[1] >> 2)&0x1) card->ringspeed = 2; /* not selected */ else if ((mdev->pos[2] >> 5) & 0x1) card->ringspeed = 1; /* 16Mb */ else card->ringspeed = 0; /* 4Mb */ /* * Cable type */ if ((mdev->pos[1] >> 6)&0x1) card->cabletype = 1; /* STP/DB9 */ else card->cabletype = 0; /* UTP/RJ-45 */ /* * ROM Info. This requires us to actually twiddle * bits on the card, so we must ensure above that * the base address is free of conflict (request_region above). */ madgemc_read_rom(dev, card); if (card->manid != 0x4d) { /* something went wrong */ printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid); goto getout3; } if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) { printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype); ret = -EIO; goto getout3; } /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */ if ((card->cardtype == 0x08) && (card->cardrev <= 0x01)) card->ramsize = 128; else card->ramsize = 256; printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", dev->name, (card->cardtype == 0x08)?MADGEMC16_CARDNAME: MADGEMC32_CARDNAME, card->cardrev, dev->base_addr, dev->irq); if (card->cardtype == 0x0d) printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name); if (card->ringspeed==2) { /* Unknown */ printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name); card->ringspeed = 1; /* default to 16mb */ } printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize); printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name, (card->ringspeed)?16:4, card->cabletype?"STP/DB9":"UTP/RJ-45"); printk("%s: Arbitration Level: %d\n", dev->name, card->arblevel); printk("%s: Burst Mode: ", dev->name); switch(card->burstmode) { case 0: printk("Cycle steal"); break; case 1: printk("Limited burst"); break; case 2: printk("Delayed release"); break; case 3: printk("Immediate release"); break; } printk(" (%s)\n", (card->fairness)?"Unfair":"Fair"); /* * Enable SIF before we assign the interrupt handler, * just in case we get spurious interrupts that need * handling. */ outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */ madgemc_setsifsel(dev, 1); if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED, "madgemc", dev)) { ret = -EBUSY; goto getout3; } madgemc_chipset_init(dev); /* enables interrupts! */ madgemc_setcabletype(dev, card->cabletype); /* Setup MCA structures */ mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr); if (tmsdev_init(dev, device)) { printk("%s: unable to get memory for dev->priv.\n", dev->name); ret = -ENOMEM; goto getout4; } tp = netdev_priv(dev); /* * The MC16 is physically a 32bit card. However, Madge * insists on calling it 16bit, so I'll assume here that * they know what they're talking about. Cut off DMA * at 16mb. */ tp->setnselout = madgemc_setnselout_pins; tp->sifwriteb = madgemc_sifwriteb; tp->sifreadb = madgemc_sifreadb; tp->sifwritew = madgemc_sifwritew; tp->sifreadw = madgemc_sifreadw; tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4; memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); tp->tmspriv = card; dev_set_drvdata(device, dev); if (register_netdev(dev) == 0) return 0; dev_set_drvdata(device, NULL); ret = -ENOMEM; getout4: free_irq(dev->irq, dev); getout3: release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); getout2: kfree(card); getout1: free_netdev(dev); getout: mca_device_set_claim(mdev, 0); return ret; } /* * Handle interrupts generated by the card * * The MicroChannel Madge cards need slightly more handling * after an interrupt than other TMS380 cards do. * * First we must make sure it was this card that generated the * interrupt (since interrupt sharing is allowed). Then, * because we're using level-triggered interrupts (as is * standard on MCA), we must toggle the interrupt line * on the card in order to claim and acknowledge the interrupt. * Once that is done, the interrupt should be handlable in * the normal tms380tr_interrupt() routine. * * There's two ways we can check to see if the interrupt is ours, * both with their own disadvantages... * * 1) Read in the SIFSTS register from the TMS controller. This * is guaranteed to be accurate, however, there's a fairly * large performance penalty for doing so: the Madge chips * must request the register from the Eagle, the Eagle must * read them from its internal bus, and then take the route * back out again, for a 16bit read. * * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs. * The major disadvantage here is that the accuracy of the * bit is in question. However, it cuts out the extra read * cycles it takes to read the Eagle's SIF, as its only an * 8bit read, and theoretically the Madge bit is directly * connected to the interrupt latch coming out of the Eagle * hardware (that statement is not verified). * * I can't determine which of these methods has the best win. For now, * we make a compromise. Use the Madge way for the first interrupt, * which should be the fast-path, and then once we hit the first * interrupt, keep on trying using the SIF method until we've * exhausted all contiguous interrupts. * */ static irqreturn_t madgemc_interrupt(int irq, void *dev_id) { int pending,reg1; struct net_device *dev; if (!dev_id) { printk("madgemc_interrupt: was not passed a dev_id!\n"); return IRQ_NONE; } dev = dev_id; /* Make sure its really us. -- the Madge way */ pending = inb(dev->base_addr + MC_CONTROL_REG0); if (!(pending & MC_CONTROL_REG0_SINTR)) return IRQ_NONE; /* not our interrupt */ /* * Since we're level-triggered, we may miss the rising edge * of the next interrupt while we're off handling this one, * so keep checking until the SIF verifies that it has nothing * left for us to do. */ pending = STS_SYSTEM_IRQ; do { if (pending & STS_SYSTEM_IRQ) { /* Toggle the interrupt to reset the latch on card */ reg1 = inb(dev->base_addr + MC_CONTROL_REG1); outb(reg1 ^ MC_CONTROL_REG1_SINTEN, dev->base_addr + MC_CONTROL_REG1); outb(reg1, dev->base_addr + MC_CONTROL_REG1); /* Continue handling as normal */ tms380tr_interrupt(irq, dev_id); pending = SIFREADW(SIFSTS); /* restart - the SIF way */ } else return IRQ_HANDLED; } while (1); return IRQ_HANDLED; /* not reachable */ } /* * Set the card to the preferred ring speed. * * Unlike newer cards, the MC16/32 have their speed selection * circuit connected to the Madge ASICs and not to the TMS380 * NSELOUT pins. Set the ASIC bits correctly here, and return * zero to leave the TMS NSELOUT bits unaffected. * */ static unsigned short madgemc_setnselout_pins(struct net_device *dev) { unsigned char reg1; struct net_local *tp = netdev_priv(dev); reg1 = inb(dev->base_addr + MC_CONTROL_REG1); if(tp->DataRate == SPEED_16) reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */ else if (reg1 & MC_CONTROL_REG1_SPEED_SEL) reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */ outb(reg1, dev->base_addr + MC_CONTROL_REG1); return 0; /* no change */ } /* * Set the register page. This equates to the SRSX line * on the TMS380Cx6. * * Register selection is normally done via three contiguous * bits. However, some boards (such as the MC16/32) use only * two bits, plus a separate bit in the glue chip. This * sets the SRSX bit (the top bit). See page 4-17 in the * Yellow Book for which registers are affected. * */ static void madgemc_setregpage(struct net_device *dev, int page) { static int reg1; reg1 = inb(dev->base_addr + MC_CONTROL_REG1); if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) { outb(reg1 ^ MC_CONTROL_REG1_SRSX, dev->base_addr + MC_CONTROL_REG1); } else if (page == 1) { outb(reg1 | MC_CONTROL_REG1_SRSX, dev->base_addr + MC_CONTROL_REG1); } reg1 = inb(dev->base_addr + MC_CONTROL_REG1); } /* * The SIF registers are not mapped into register space by default * Set this to 1 to map them, 0 to map the BIA ROM. * */ static void madgemc_setsifsel(struct net_device *dev, int val) { unsigned int reg0; reg0 = inb(dev->base_addr + MC_CONTROL_REG0); if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) { outb(reg0 ^ MC_CONTROL_REG0_SIFSEL, dev->base_addr + MC_CONTROL_REG0); } else if (val == 1) { outb(reg0 | MC_CONTROL_REG0_SIFSEL, dev->base_addr + MC_CONTROL_REG0); } reg0 = inb(dev->base_addr + MC_CONTROL_REG0); } /* * Enable SIF interrupts * * This does not enable interrupts in the SIF, but rather * enables SIF interrupts to be passed onto the host. * */ static void madgemc_setint(struct net_device *dev, int val) { unsigned int reg1; reg1 = inb(dev->base_addr + MC_CONTROL_REG1); if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) { outb(reg1 ^ MC_CONTROL_REG1_SINTEN, dev->base_addr + MC_CONTROL_REG1); } else if (val == 1) { outb(reg1 | MC_CONTROL_REG1_SINTEN, dev->base_addr + MC_CONTROL_REG1); } } /* * Cable type is set via control register 7. Bit zero high * for UTP, low for STP. */ static void madgemc_setcabletype(struct net_device *dev, int type) { outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP, dev->base_addr + MC_CONTROL_REG7); } /* * Enable the functions of the Madge chipset needed for * full working order. */ static int madgemc_chipset_init(struct net_device *dev) { outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */ tms380tr_wait(100); /* wait for card to reset */ /* bring back into normal operating mode */ outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1); /* map SIF registers */ madgemc_setsifsel(dev, 1); /* enable SIF interrupts */ madgemc_setint(dev, 1); return 0; } /* * Disable the board, and put back into power-up state. */ static void madgemc_chipset_close(struct net_device *dev) { /* disable interrupts */ madgemc_setint(dev, 0); /* unmap SIF registers */ madgemc_setsifsel(dev, 0); } /* * Read the card type (MC16 or MC32) from the card. * * The configuration registers are stored in two separate * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE) * for page zero, or setting bit 3 for page one. * * Page zero contains the following data: * Byte 0: Manufacturer ID (0x4D -- ASCII "M") * Byte 1: Card type: * 0x08 for MC16 * 0x0D for MC32 * Byte 2: Card revision * Byte 3: Mirror of POS config register 0 * Byte 4: Mirror of POS 1 * Byte 5: Mirror of POS 2 * * Page one contains the following data: * Byte 0: Unused * Byte 1-6: BIA, MSB to LSB. * * Note that to read the BIA, we must unmap the SIF registers * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data * will reside in the same logical location. For this reason, * _never_ read the BIA while the Eagle processor is running! * The SIF will be completely inaccessible until the BIA operation * is complete. * */ static void madgemc_read_rom(struct net_device *dev, struct card_info *card) { unsigned long ioaddr; unsigned char reg0, reg1, tmpreg0, i; ioaddr = dev->base_addr; reg0 = inb(ioaddr + MC_CONTROL_REG0); reg1 = inb(ioaddr + MC_CONTROL_REG1); /* Switch to page zero and unmap SIF */ tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL); outb(tmpreg0, ioaddr + MC_CONTROL_REG0); card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID); card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID); card->cardrev = inb(ioaddr + MC_ROM_REVISION); /* Switch to rom page one */ outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0); /* Read BIA */ dev->addr_len = 6; for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i); /* Restore original register values */ outb(reg0, ioaddr + MC_CONTROL_REG0); outb(reg1, ioaddr + MC_CONTROL_REG1); } static int madgemc_open(struct net_device *dev) { /* * Go ahead and reinitialize the chipset again, just to * make sure we didn't get left in a bad state. */ madgemc_chipset_init(dev); tms380tr_open(dev); return 0; } static int madgemc_close(struct net_device *dev) { tms380tr_close(dev); madgemc_chipset_close(dev); return 0; } /* * Give some details available from /proc/mca/slotX */ static int madgemc_mcaproc(char *buf, int slot, void *d) { struct net_device *dev = (struct net_device *)d; struct net_local *tp = netdev_priv(dev); struct card_info *curcard = tp->tmspriv; int len = 0; len += sprintf(buf+len, "-------\n"); if (curcard) { len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev); len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize); len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45"); len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4); len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4); len += sprintf(buf+len, "Device: %s\n", dev->name); len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr); len += sprintf(buf+len, "IRQ: %d\n", dev->irq); len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel); len += sprintf(buf+len, "Burst Mode: "); switch(curcard->burstmode) { case 0: len += sprintf(buf+len, "Cycle steal"); break; case 1: len += sprintf(buf+len, "Limited burst"); break; case 2: len += sprintf(buf+len, "Delayed release"); break; case 3: len += sprintf(buf+len, "Immediate release"); break; } len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); len += sprintf(buf+len, "Ring Station Address: %pM\n", dev->dev_addr); } else len += sprintf(buf+len, "Card not configured\n"); return len; } static int __devexit madgemc_remove(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct net_local *tp; struct card_info *card; BUG_ON(!dev); tp = netdev_priv(dev); card = tp->tmspriv; kfree(card); tp->tmspriv = NULL; unregister_netdev(dev); release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); free_irq(dev->irq, dev); tmsdev_term(dev); free_netdev(dev); dev_set_drvdata(device, NULL); return 0; } static short madgemc_adapter_ids[] __initdata = { 0x002d, 0x0000 }; static struct mca_driver madgemc_driver = { .id_table = madgemc_adapter_ids, .driver = { .name = "madgemc", .bus = &mca_bus_type, .probe = madgemc_probe, .remove = __devexit_p(madgemc_remove), }, }; static int __init madgemc_init (void) { madgemc_netdev_ops = tms380tr_netdev_ops; madgemc_netdev_ops.ndo_open = madgemc_open; madgemc_netdev_ops.ndo_stop = madgemc_close; return mca_register_driver (&madgemc_driver); } static void __exit madgemc_exit (void) { mca_unregister_driver (&madgemc_driver); } module_init(madgemc_init); module_exit(madgemc_exit); MODULE_LICENSE("GPL");
gpl-2.0
DirtyUnicorns-Ports/android_kernel_samsung_jf
block/blk-flush.c
5271
13401
/* * Functions to sequence FLUSH and FUA writes. * * Copyright (C) 2011 Max Planck Institute for Gravitational Physics * Copyright (C) 2011 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * * If a request doesn't have data, only REQ_FLUSH makes sense, which * indicates a simple flush request. If there is data, REQ_FLUSH indicates * that the device cache should be flushed before the data is executed, and * REQ_FUA means that the data must be on non-volatile media on request * completion. * * If the device doesn't have writeback cache, FLUSH and FUA don't make any * difference. The requests are either completed immediately if there's no * data or executed as normal requests otherwise. * * If the device has writeback cache and supports FUA, REQ_FLUSH is * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. * * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is * translated to PREFLUSH and REQ_FUA to POSTFLUSH. * * The actual execution of flush is double buffered. Whenever a request * needs to execute PRE or POSTFLUSH, it queues at * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a * flush is issued and the pending_idx is toggled. When the flush * completes, all the requests which were pending are proceeded to the next * step. This allows arbitrary merging of different types of FLUSH/FUA * requests. * * Currently, the following conditions are used to determine when to issue * flush. * * C1. At any given time, only one flush shall be in progress. This makes * double buffering sufficient. * * C2. Flush is deferred if any request is executing DATA of its sequence. * This avoids issuing separate POSTFLUSHes for requests which shared * PREFLUSH. * * C3. The second condition is ignored if there is a request which has * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid * starvation in the unlikely case where there are continuous stream of * FUA (without FLUSH) requests. * * For devices which support FUA, it isn't clear whether C2 (and thus C3) * is beneficial. * * Note that a sequenced FLUSH/FUA request with DATA is completed twice. * Once while executing DATA and again after the whole sequence is * complete. The first completion updates the contained bio but doesn't * finish it so that the bio submitter is notified only after the whole * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in * req_bio_endio(). * * The above peculiarity requires that each FLUSH/FUA request has only one * bio attached to it, which is guaranteed as they aren't allowed to be * merged in the usual way. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/gfp.h> #include "blk.h" /* FLUSH/FUA sequences */ enum { REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ REQ_FSEQ_DONE = (1 << 3), REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH, /* * If flush has been pending longer than the following timeout, * it's issued even if flush_data requests are still in flight. */ FLUSH_PENDING_TIMEOUT = 5 * HZ, }; static bool blk_kick_flush(struct request_queue *q); static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { unsigned int policy = 0; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; if (fflags & REQ_FLUSH) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; } static unsigned int blk_flush_cur_seq(struct request *rq) { return 1 << ffz(rq->flush.seq); } static void blk_flush_restore_request(struct request *rq) { /* * After flush data completion, @rq->bio is %NULL but we need to * complete the bio again. @rq->biotail is guaranteed to equal the * original @rq->bio. Restore it. */ rq->bio = rq->biotail; /* make @rq a normal request */ rq->cmd_flags &= ~REQ_FLUSH_SEQ; rq->end_io = rq->flush.saved_end_io; } /** * blk_flush_complete_seq - complete flush sequence * @rq: FLUSH/FUA request being sequenced * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) * @error: whether an error occurred * * @rq just completed @seq part of its flush sequence, record the * completion and trigger the next step. * * CONTEXT: * spin_lock_irq(q->queue_lock) * * RETURNS: * %true if requests were added to the dispatch queue, %false otherwise. */ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, int error) { struct request_queue *q = rq->q; struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; bool queued = false; BUG_ON(rq->flush.seq & seq); rq->flush.seq |= seq; if (likely(!error)) seq = blk_flush_cur_seq(rq); else seq = REQ_FSEQ_DONE; switch (seq) { case REQ_FSEQ_PREFLUSH: case REQ_FSEQ_POSTFLUSH: /* queue for flush */ if (list_empty(pending)) q->flush_pending_since = jiffies; list_move_tail(&rq->flush.list, pending); break; case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &q->flush_data_in_flight); list_add(&rq->queuelist, &q->queue_head); queued = true; break; case REQ_FSEQ_DONE: /* * @rq was previously adjusted by blk_flush_issue() for * flush sequencing and may already have gone through the * flush data request completion path. Restore @rq for * normal completion and end it. */ BUG_ON(!list_empty(&rq->queuelist)); list_del_init(&rq->flush.list); blk_flush_restore_request(rq); __blk_end_request_all(rq, error); break; default: BUG(); } return blk_kick_flush(q) | queued; } static void flush_end_io(struct request *flush_rq, int error) { struct request_queue *q = flush_rq->q; struct list_head *running = &q->flush_queue[q->flush_running_idx]; bool queued = false; struct request *rq, *n; BUG_ON(q->flush_pending_idx == q->flush_running_idx); /* account completion of the flush request */ q->flush_running_idx ^= 1; elv_completed_request(q, flush_rq); /* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); queued |= blk_flush_complete_seq(rq, seq, error); } /* * Kick the queue to avoid stall for two cases: * 1. Moving a request silently to empty queue_head may stall the * queue. * 2. When flush request is running in non-queueable queue, the * queue is hold. Restart the queue after flush request is finished * to avoid stall. * This function is called from request completion path and calling * directly into request_fn may confuse the driver. Always use * kblockd. */ if (queued || q->flush_queue_delayed) blk_run_queue_async(q); q->flush_queue_delayed = 0; } /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked * * Flush related states of @q have changed, consider issuing flush request. * Please read the comment at the top of this file for more info. * * CONTEXT: * spin_lock_irq(q->queue_lock) * * RETURNS: * %true if flush was issued, %false otherwise. */ static bool blk_kick_flush(struct request_queue *q) { struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; struct request *first_rq = list_first_entry(pending, struct request, flush.list); /* C1 described at the top of this file */ if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) return false; /* C2 and C3 */ if (!list_empty(&q->flush_data_in_flight) && time_before(jiffies, q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ blk_rq_init(q, &q->flush_rq); q->flush_rq.cmd_type = REQ_TYPE_FS; q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; q->flush_rq.rq_disk = first_rq->rq_disk; q->flush_rq.end_io = flush_end_io; q->flush_pending_idx ^= 1; list_add_tail(&q->flush_rq.queuelist, &q->queue_head); return true; } static void flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) blk_run_queue_async(q); } /** * blk_insert_flush - insert a new FLUSH/FUA request * @rq: request to insert * * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. * * CONTEXT: * spin_lock_irq(q->queue_lock) */ void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned int fflags = q->flush_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); /* * @policy now records what operations need to be done. Adjust * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; if (!(fflags & REQ_FUA)) rq->cmd_flags &= ~REQ_FUA; /* * An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { __blk_end_bidi_request(rq, 0, 0, 0); return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { list_add_tail(&rq->queuelist, &q->queue_head); return; } /* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->cmd_flags |= REQ_FLUSH_SEQ; rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); } /** * blk_abort_flushes - @q is being aborted, abort flush requests * @q: request_queue being aborted * * To be called from elv_abort_queue(). @q is being aborted. Prepare all * FLUSH/FUA requests for abortion. * * CONTEXT: * spin_lock_irq(q->queue_lock) */ void blk_abort_flushes(struct request_queue *q) { struct request *rq, *n; int i; /* * Requests in flight for data are already owned by the dispatch * queue or the device driver. Just restore for normal completion. */ list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { list_del_init(&rq->flush.list); blk_flush_restore_request(rq); } /* * We need to give away requests on flush queues. Restore for * normal completion and put them on the dispatch queue. */ for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { list_for_each_entry_safe(rq, n, &q->flush_queue[i], flush.list) { list_del_init(&rq->flush.list); blk_flush_restore_request(rq); list_add_tail(&rq->queuelist, &q->queue_head); } } } static void bio_end_flush(struct bio *bio, int err) { if (err) clear_bit(BIO_UPTODATE, &bio->bi_flags); if (bio->bi_private) complete(bio->bi_private); bio_put(bio); } /** * blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for * @gfp_mask: memory allocation flags (for bio_alloc) * @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they * wish to. If WAIT flag is not passed then caller may check only what * request was pushed in some internal queue for later handling. */ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, sector_t *error_sector) { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q; struct bio *bio; int ret = 0; if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; /* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing * the flush. */ if (!q->make_request_fn) return -ENXIO; bio = bio_alloc(gfp_mask, 0); bio->bi_end_io = bio_end_flush; bio->bi_bdev = bdev; bio->bi_private = &wait; bio_get(bio); submit_bio(WRITE_FLUSH, bio); wait_for_completion(&wait); /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_sector; if (!bio_flagged(bio, BIO_UPTODATE)) ret = -EIO; bio_put(bio); return ret; } EXPORT_SYMBOL(blkdev_issue_flush);
gpl-2.0
mrlambchop/imx23-kernel
arch/sparc/kernel/auxio_64.c
7575
3195
/* auxio.c: Probing for the Sparc AUXIO register at boot time. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/auxio.h> void __iomem *auxio_register = NULL; EXPORT_SYMBOL(auxio_register); enum auxio_type { AUXIO_TYPE_NODEV, AUXIO_TYPE_SBUS, AUXIO_TYPE_EBUS }; static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV; static DEFINE_SPINLOCK(auxio_lock); static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus) { if (auxio_register) { unsigned long flags; u8 regval, newval; spin_lock_irqsave(&auxio_lock, flags); regval = (ebus ? (u8) readl(auxio_register) : sbus_readb(auxio_register)); newval = regval | bits_on; newval &= ~bits_off; if (!ebus) newval &= ~AUXIO_AUX1_MASK; if (ebus) writel((u32) newval, auxio_register); else sbus_writeb(newval, auxio_register); spin_unlock_irqrestore(&auxio_lock, flags); } } static void __auxio_set_bit(u8 bit, int on, int ebus) { u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); u8 bits_off = 0; if (!on) { u8 tmp = bits_off; bits_off = bits_on; bits_on = tmp; } __auxio_rmw(bits_on, bits_off, ebus); } void auxio_set_led(int on) { int ebus = auxio_devtype == AUXIO_TYPE_EBUS; u8 bit; bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); __auxio_set_bit(bit, on, ebus); } EXPORT_SYMBOL(auxio_set_led); static void __auxio_sbus_set_lte(int on) { __auxio_set_bit(AUXIO_AUX1_LTE, on, 0); } void auxio_set_lte(int on) { switch(auxio_devtype) { case AUXIO_TYPE_SBUS: __auxio_sbus_set_lte(on); break; case AUXIO_TYPE_EBUS: /* FALL-THROUGH */ default: break; } } EXPORT_SYMBOL(auxio_set_lte); static const struct of_device_id auxio_match[] = { { .name = "auxio", }, {}, }; MODULE_DEVICE_TABLE(of, auxio_match); static int __devinit auxio_probe(struct platform_device *dev) { struct device_node *dp = dev->dev.of_node; unsigned long size; if (!strcmp(dp->parent->name, "ebus")) { auxio_devtype = AUXIO_TYPE_EBUS; size = sizeof(u32); } else if (!strcmp(dp->parent->name, "sbus")) { auxio_devtype = AUXIO_TYPE_SBUS; size = 1; } else { printk("auxio: Unknown parent bus type [%s]\n", dp->parent->name); return -ENODEV; } auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); if (!auxio_register) return -ENODEV; printk(KERN_INFO "AUXIO: Found device at %s\n", dp->full_name); if (auxio_devtype == AUXIO_TYPE_EBUS) auxio_set_led(AUXIO_LED_ON); return 0; } static struct platform_driver auxio_driver = { .probe = auxio_probe, .driver = { .name = "auxio", .owner = THIS_MODULE, .of_match_table = auxio_match, }, }; static int __init auxio_init(void) { return platform_driver_register(&auxio_driver); } /* Must be after subsys_initcall() so that busses are probed. Must * be before device_initcall() because things like the floppy driver * need to use the AUXIO register. */ fs_initcall(auxio_init);
gpl-2.0
Constellation/linux-3.13-rc2
drivers/s390/crypto/ap_bus.c
152
52853
/* * Copyright IBM Corp. 2006, 2012 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * Felix Beck <felix.beck@de.ibm.com> * Holger Dengler <hd@linux.vnet.ibm.com> * * Adjunct processor bus. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define KMSG_COMPONENT "ap" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <asm/reset.h> #include <asm/airq.h> #include <linux/atomic.h> #include <asm/isc.h> #include <linux/hrtimer.h> #include <linux/ktime.h> #include <asm/facility.h> #include "ap_bus.h" /* Some prototypes. */ static void ap_scan_bus(struct work_struct *); static void ap_poll_all(unsigned long); static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); static int ap_poll_thread_start(void); static void ap_poll_thread_stop(void); static void ap_request_timeout(unsigned long); static inline void ap_schedule_poll_timer(void); static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); static int ap_device_remove(struct device *dev); static int ap_device_probe(struct device *dev); static void ap_interrupt_handler(struct airq_struct *airq); static void ap_reset(struct ap_device *ap_dev); static void ap_config_timeout(unsigned long ptr); static int ap_select_domain(void); static void ap_query_configuration(void); /* * Module description. */ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ "Copyright IBM Corp. 2006, 2012"); MODULE_LICENSE("GPL"); MODULE_ALIAS("z90crypt"); /* * Module parameter */ int ap_domain_index = -1; /* Adjunct Processor Domain Index */ module_param_named(domain, ap_domain_index, int, 0000); MODULE_PARM_DESC(domain, "domain index for ap devices"); EXPORT_SYMBOL(ap_domain_index); static int ap_thread_flag = 0; module_param_named(poll_thread, ap_thread_flag, int, 0000); MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); static struct device *ap_root_device = NULL; static struct ap_config_info *ap_configuration; static DEFINE_SPINLOCK(ap_device_list_lock); static LIST_HEAD(ap_device_list); /* * Workqueue & timer for bus rescan. */ static struct workqueue_struct *ap_work_queue; static struct timer_list ap_config_timer; static int ap_config_time = AP_CONFIG_TIME; static DECLARE_WORK(ap_config_work, ap_scan_bus); /* * Tasklet & timer for AP request polling and interrupts */ static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); static atomic_t ap_poll_requests = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread = NULL; static DEFINE_MUTEX(ap_poll_thread_mutex); static DEFINE_SPINLOCK(ap_poll_timer_lock); static struct hrtimer ap_poll_timer; /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ static unsigned long long poll_timeout = 250000; /* Suspend flag */ static int ap_suspend_flag; /* Flag to check if domain was set through module parameter domain=. This is * important when supsend and resume is done in a z/VM environment where the * domain might change. */ static int user_set_domain = 0; static struct bus_type ap_bus_type; /* Adapter interrupt definitions */ static int ap_airq_flag; static struct airq_struct ap_airq = { .handler = ap_interrupt_handler, .isc = AP_ISC, }; /** * ap_using_interrupts() - Returns non-zero if interrupt support is * available. */ static inline int ap_using_interrupts(void) { return ap_airq_flag; } /** * ap_intructions_available() - Test if AP instructions are available. * * Returns 0 if the AP instructions are installed. */ static inline int ap_instructions_available(void) { register unsigned long reg0 asm ("0") = AP_MKQID(0,0); register unsigned long reg1 asm ("1") = -ENODEV; register unsigned long reg2 asm ("2") = 0UL; asm volatile( " .long 0xb2af0000\n" /* PQAP(TAPQ) */ "0: la %1,0\n" "1:\n" EX_TABLE(0b, 1b) : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); return reg1; } /** * ap_interrupts_available(): Test if AP interrupts are available. * * Returns 1 if AP interrupts are available. */ static int ap_interrupts_available(void) { return test_facility(2) && test_facility(65); } /** * ap_configuration_available(): Test if AP configuration * information is available. * * Returns 1 if AP configuration information is available. */ #ifdef CONFIG_64BIT static int ap_configuration_available(void) { return test_facility(2) && test_facility(12); } #endif /** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number * @queue_depth: Pointer to queue depth value * @device_type: Pointer to device type value * * Returns AP queue status structure. */ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) { register unsigned long reg0 asm ("0") = qid; register struct ap_queue_status reg1 asm ("1"); register unsigned long reg2 asm ("2") = 0UL; asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); *device_type = (int) (reg2 >> 24); *queue_depth = (int) (reg2 & 0xff); return reg1; } /** * ap_reset_queue(): Reset adjunct processor queue. * @qid: The AP queue number * * Returns AP queue status structure. */ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) { register unsigned long reg0 asm ("0") = qid | 0x01000000UL; register struct ap_queue_status reg1 asm ("1"); register unsigned long reg2 asm ("2") = 0UL; asm volatile( ".long 0xb2af0000" /* PQAP(RAPQ) */ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); return reg1; } #ifdef CONFIG_64BIT /** * ap_queue_interruption_control(): Enable interruption for a specific AP. * @qid: The AP queue number * @ind: The notification indicator byte * * Returns AP queue status. */ static inline struct ap_queue_status ap_queue_interruption_control(ap_qid_t qid, void *ind) { register unsigned long reg0 asm ("0") = qid | 0x03000000UL; register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; register struct ap_queue_status reg1_out asm ("1"); register void *reg2 asm ("2") = ind; asm volatile( ".long 0xb2af0000" /* PQAP(AQIC) */ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) : : "cc" ); return reg1_out; } #endif #ifdef CONFIG_64BIT static inline struct ap_queue_status __ap_query_functions(ap_qid_t qid, unsigned int *functions) { register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; register unsigned long reg2 asm ("2"); asm volatile( ".long 0xb2af0000\n" /* PQAP(TAPQ) */ "0:\n" EX_TABLE(0b, 0b) : "+d" (reg0), "+d" (reg1), "=d" (reg2) : : "cc"); *functions = (unsigned int)(reg2 >> 32); return reg1; } #endif #ifdef CONFIG_64BIT static inline int __ap_query_configuration(struct ap_config_info *config) { register unsigned long reg0 asm ("0") = 0x04000000UL; register unsigned long reg1 asm ("1") = -EINVAL; register unsigned char *reg2 asm ("2") = (unsigned char *)config; asm volatile( ".long 0xb2af0000\n" /* PQAP(QCI) */ "0: la %1,0\n" "1:\n" EX_TABLE(0b, 1b) : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc"); return reg1; } #endif /** * ap_query_functions(): Query supported functions. * @qid: The AP queue number * @functions: Pointer to functions field. * * Returns * 0 on success. * -ENODEV if queue not valid. * -EBUSY if device busy. * -EINVAL if query function is not supported */ static int ap_query_functions(ap_qid_t qid, unsigned int *functions) { #ifdef CONFIG_64BIT struct ap_queue_status status; int i; status = __ap_query_functions(qid, functions); for (i = 0; i < AP_MAX_RESET; i++) { if (ap_queue_status_invalid_test(&status)) return -ENODEV; switch (status.response_code) { case AP_RESPONSE_NORMAL: return 0; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: break; case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: case AP_RESPONSE_INVALID_ADDRESS: return -ENODEV; case AP_RESPONSE_OTHERWISE_CHANGED: break; default: break; } if (i < AP_MAX_RESET - 1) { udelay(5); status = __ap_query_functions(qid, functions); } } return -EBUSY; #else return -EINVAL; #endif } /** * ap_queue_enable_interruption(): Enable interruption on an AP. * @qid: The AP queue number * @ind: the notification indicator byte * * Enables interruption on AP queue via ap_queue_interruption_control(). Based * on the return value it waits a while and tests the AP queue if interrupts * have been switched on using ap_test_queue(). */ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) { #ifdef CONFIG_64BIT struct ap_queue_status status; int t_depth, t_device_type, rc, i; rc = -EBUSY; status = ap_queue_interruption_control(qid, ind); for (i = 0; i < AP_MAX_RESET; i++) { switch (status.response_code) { case AP_RESPONSE_NORMAL: if (status.int_enabled) return 0; break; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: if (i < AP_MAX_RESET - 1) { udelay(5); status = ap_queue_interruption_control(qid, ind); continue; } break; case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: case AP_RESPONSE_INVALID_ADDRESS: return -ENODEV; case AP_RESPONSE_OTHERWISE_CHANGED: if (status.int_enabled) return 0; break; default: break; } if (i < AP_MAX_RESET - 1) { udelay(5); status = ap_test_queue(qid, &t_depth, &t_device_type); } } return rc; #else return -EINVAL; #endif } /** * __ap_send(): Send message to adjunct processor queue. * @qid: The AP queue number * @psmid: The program supplied message identifier * @msg: The message text * @length: The message length * @special: Special Bit * * Returns AP queue status structure. * Condition code 1 on NQAP can't happen because the L bit is 1. * Condition code 2 on NQAP also means the send is incomplete, * because a segment boundary was reached. The NQAP is repeated. */ static inline struct ap_queue_status __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, unsigned int special) { typedef struct { char _[length]; } msgblock; register unsigned long reg0 asm ("0") = qid | 0x40000000UL; register struct ap_queue_status reg1 asm ("1"); register unsigned long reg2 asm ("2") = (unsigned long) msg; register unsigned long reg3 asm ("3") = (unsigned long) length; register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); register unsigned long reg5 asm ("5") = psmid & 0xffffffff; if (special == 1) reg0 |= 0x400000UL; asm volatile ( "0: .long 0xb2ad0042\n" /* NQAP */ " brc 2,0b" : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) : "cc" ); return reg1; } int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) { struct ap_queue_status status; status = __ap_send(qid, psmid, msg, length, 0); switch (status.response_code) { case AP_RESPONSE_NORMAL: return 0; case AP_RESPONSE_Q_FULL: case AP_RESPONSE_RESET_IN_PROGRESS: return -EBUSY; case AP_RESPONSE_REQ_FAC_NOT_INST: return -EINVAL; default: /* Device is gone. */ return -ENODEV; } } EXPORT_SYMBOL(ap_send); /** * __ap_recv(): Receive message from adjunct processor queue. * @qid: The AP queue number * @psmid: Pointer to program supplied message identifier * @msg: The message text * @length: The message length * * Returns AP queue status structure. * Condition code 1 on DQAP means the receive has taken place * but only partially. The response is incomplete, hence the * DQAP is repeated. * Condition code 2 on DQAP also means the receive is incomplete, * this time because a segment boundary was reached. Again, the * DQAP is repeated. * Note that gpr2 is used by the DQAP instruction to keep track of * any 'residual' length, in case the instruction gets interrupted. * Hence it gets zeroed before the instruction. */ static inline struct ap_queue_status __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) { typedef struct { char _[length]; } msgblock; register unsigned long reg0 asm("0") = qid | 0x80000000UL; register struct ap_queue_status reg1 asm ("1"); register unsigned long reg2 asm("2") = 0UL; register unsigned long reg4 asm("4") = (unsigned long) msg; register unsigned long reg5 asm("5") = (unsigned long) length; register unsigned long reg6 asm("6") = 0UL; register unsigned long reg7 asm("7") = 0UL; asm volatile( "0: .long 0xb2ae0064\n" /* DQAP */ " brc 6,0b\n" : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), "=m" (*(msgblock *) msg) : : "cc" ); *psmid = (((unsigned long long) reg6) << 32) + reg7; return reg1; } int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) { struct ap_queue_status status; status = __ap_recv(qid, psmid, msg, length); switch (status.response_code) { case AP_RESPONSE_NORMAL: return 0; case AP_RESPONSE_NO_PENDING_REPLY: if (status.queue_empty) return -ENOENT; return -EBUSY; case AP_RESPONSE_RESET_IN_PROGRESS: return -EBUSY; default: return -ENODEV; } } EXPORT_SYMBOL(ap_recv); /** * ap_query_queue(): Check if an AP queue is available. * @qid: The AP queue number * @queue_depth: Pointer to queue depth value * @device_type: Pointer to device type value * * The test is repeated for AP_MAX_RESET times. */ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) { struct ap_queue_status status; int t_depth, t_device_type, rc, i; rc = -EBUSY; for (i = 0; i < AP_MAX_RESET; i++) { status = ap_test_queue(qid, &t_depth, &t_device_type); switch (status.response_code) { case AP_RESPONSE_NORMAL: *queue_depth = t_depth + 1; *device_type = t_device_type; rc = 0; break; case AP_RESPONSE_Q_NOT_AVAIL: rc = -ENODEV; break; case AP_RESPONSE_RESET_IN_PROGRESS: break; case AP_RESPONSE_DECONFIGURED: rc = -ENODEV; break; case AP_RESPONSE_CHECKSTOPPED: rc = -ENODEV; break; case AP_RESPONSE_INVALID_ADDRESS: rc = -ENODEV; break; case AP_RESPONSE_OTHERWISE_CHANGED: break; case AP_RESPONSE_BUSY: break; default: BUG(); } if (rc != -EBUSY) break; if (i < AP_MAX_RESET - 1) udelay(5); } return rc; } /** * ap_init_queue(): Reset an AP queue. * @qid: The AP queue number * * Reset an AP queue and wait for it to become available again. */ static int ap_init_queue(ap_qid_t qid) { struct ap_queue_status status; int rc, dummy, i; rc = -ENODEV; status = ap_reset_queue(qid); for (i = 0; i < AP_MAX_RESET; i++) { switch (status.response_code) { case AP_RESPONSE_NORMAL: if (status.queue_empty) rc = 0; break; case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: i = AP_MAX_RESET; /* return with -ENODEV */ break; case AP_RESPONSE_RESET_IN_PROGRESS: rc = -EBUSY; case AP_RESPONSE_BUSY: default: break; } if (rc != -ENODEV && rc != -EBUSY) break; if (i < AP_MAX_RESET - 1) { udelay(5); status = ap_test_queue(qid, &dummy, &dummy); } } if (rc == 0 && ap_using_interrupts()) { rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); /* If interruption mode is supported by the machine, * but an AP can not be enabled for interruption then * the AP will be discarded. */ if (rc) pr_err("Registering adapter interrupts for " "AP %d failed\n", AP_QID_DEVICE(qid)); } return rc; } /** * ap_increase_queue_count(): Arm request timeout. * @ap_dev: Pointer to an AP device. * * Arm request timeout if an AP device was idle and a new request is submitted. */ static void ap_increase_queue_count(struct ap_device *ap_dev) { int timeout = ap_dev->drv->request_timeout; ap_dev->queue_count++; if (ap_dev->queue_count == 1) { mod_timer(&ap_dev->timeout, jiffies + timeout); ap_dev->reset = AP_RESET_ARMED; } } /** * ap_decrease_queue_count(): Decrease queue count. * @ap_dev: Pointer to an AP device. * * If AP device is still alive, re-schedule request timeout if there are still * pending requests. */ static void ap_decrease_queue_count(struct ap_device *ap_dev) { int timeout = ap_dev->drv->request_timeout; ap_dev->queue_count--; if (ap_dev->queue_count > 0) mod_timer(&ap_dev->timeout, jiffies + timeout); else /* * The timeout timer should to be disabled now - since * del_timer_sync() is very expensive, we just tell via the * reset flag to ignore the pending timeout timer. */ ap_dev->reset = AP_RESET_IGNORE; } /* * AP device related attributes. */ static ssize_t ap_hwtype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); } static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); } static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); static ssize_t ap_request_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); int rc; spin_lock_bh(&ap_dev->lock); rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); spin_unlock_bh(&ap_dev->lock); return rc; } static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); static ssize_t ap_requestq_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); int rc; spin_lock_bh(&ap_dev->lock); rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); spin_unlock_bh(&ap_dev->lock); return rc; } static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); static ssize_t ap_pendingq_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); int rc; spin_lock_bh(&ap_dev->lock); rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); spin_unlock_bh(&ap_dev->lock); return rc; } static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); static ssize_t ap_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); } static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); static ssize_t ap_functions_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_device *ap_dev = to_ap_dev(dev); return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); } static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); static struct attribute *ap_dev_attrs[] = { &dev_attr_hwtype.attr, &dev_attr_depth.attr, &dev_attr_request_count.attr, &dev_attr_requestq_count.attr, &dev_attr_pendingq_count.attr, &dev_attr_modalias.attr, &dev_attr_ap_functions.attr, NULL }; static struct attribute_group ap_dev_attr_group = { .attrs = ap_dev_attrs }; /** * ap_bus_match() * @dev: Pointer to device * @drv: Pointer to device_driver * * AP bus driver registration/unregistration. */ static int ap_bus_match(struct device *dev, struct device_driver *drv) { struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = to_ap_drv(drv); struct ap_device_id *id; /* * Compare device type of the device with the list of * supported types of the device_driver. */ for (id = ap_drv->ids; id->match_flags; id++) { if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && (id->dev_type != ap_dev->device_type)) continue; return 1; } return 0; } /** * ap_uevent(): Uevent function for AP devices. * @dev: Pointer to device * @env: Pointer to kobj_uevent_env * * It sets up a single environment variable DEV_TYPE which contains the * hardware device type. */ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) { struct ap_device *ap_dev = to_ap_dev(dev); int retval = 0; if (!ap_dev) return -ENODEV; /* Set up DEV_TYPE environment variable. */ retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); if (retval) return retval; /* Add MODALIAS= */ retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); return retval; } static int ap_bus_suspend(struct device *dev, pm_message_t state) { struct ap_device *ap_dev = to_ap_dev(dev); unsigned long flags; if (!ap_suspend_flag) { ap_suspend_flag = 1; /* Disable scanning for devices, thus we do not want to scan * for them after removing. */ del_timer_sync(&ap_config_timer); if (ap_work_queue != NULL) { destroy_workqueue(ap_work_queue); ap_work_queue = NULL; } tasklet_disable(&ap_tasklet); } /* Poll on the device until all requests are finished. */ do { flags = 0; spin_lock_bh(&ap_dev->lock); __ap_poll_device(ap_dev, &flags); spin_unlock_bh(&ap_dev->lock); } while ((flags & 1) || (flags & 2)); spin_lock_bh(&ap_dev->lock); ap_dev->unregistered = 1; spin_unlock_bh(&ap_dev->lock); return 0; } static int ap_bus_resume(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); int rc; if (ap_suspend_flag) { ap_suspend_flag = 0; if (ap_interrupts_available()) { if (!ap_using_interrupts()) { rc = register_adapter_interrupt(&ap_airq); ap_airq_flag = (rc == 0); } } else { if (ap_using_interrupts()) { unregister_adapter_interrupt(&ap_airq); ap_airq_flag = 0; } } ap_query_configuration(); if (!user_set_domain) { ap_domain_index = -1; ap_select_domain(); } init_timer(&ap_config_timer); ap_config_timer.function = ap_config_timeout; ap_config_timer.data = 0; ap_config_timer.expires = jiffies + ap_config_time * HZ; add_timer(&ap_config_timer); ap_work_queue = create_singlethread_workqueue("kapwork"); if (!ap_work_queue) return -ENOMEM; tasklet_enable(&ap_tasklet); if (!ap_using_interrupts()) ap_schedule_poll_timer(); else tasklet_schedule(&ap_tasklet); if (ap_thread_flag) rc = ap_poll_thread_start(); else rc = 0; } else rc = 0; if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { spin_lock_bh(&ap_dev->lock); ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), ap_domain_index); spin_unlock_bh(&ap_dev->lock); } queue_work(ap_work_queue, &ap_config_work); return rc; } static struct bus_type ap_bus_type = { .name = "ap", .match = &ap_bus_match, .uevent = &ap_uevent, .suspend = ap_bus_suspend, .resume = ap_bus_resume }; static int ap_device_probe(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = to_ap_drv(dev->driver); int rc; ap_dev->drv = ap_drv; rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; if (!rc) { spin_lock_bh(&ap_device_list_lock); list_add(&ap_dev->list, &ap_device_list); spin_unlock_bh(&ap_device_list_lock); } return rc; } /** * __ap_flush_queue(): Flush requests. * @ap_dev: Pointer to the AP device * * Flush all requests from the request/pending queue of an AP device. */ static void __ap_flush_queue(struct ap_device *ap_dev) { struct ap_message *ap_msg, *next; list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { list_del_init(&ap_msg->list); ap_dev->pendingq_count--; ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); } list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { list_del_init(&ap_msg->list); ap_dev->requestq_count--; ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); } } void ap_flush_queue(struct ap_device *ap_dev) { spin_lock_bh(&ap_dev->lock); __ap_flush_queue(ap_dev); spin_unlock_bh(&ap_dev->lock); } EXPORT_SYMBOL(ap_flush_queue); static int ap_device_remove(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = ap_dev->drv; ap_flush_queue(ap_dev); del_timer_sync(&ap_dev->timeout); spin_lock_bh(&ap_device_list_lock); list_del_init(&ap_dev->list); spin_unlock_bh(&ap_device_list_lock); if (ap_drv->remove) ap_drv->remove(ap_dev); spin_lock_bh(&ap_dev->lock); atomic_sub(ap_dev->queue_count, &ap_poll_requests); spin_unlock_bh(&ap_dev->lock); return 0; } int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, char *name) { struct device_driver *drv = &ap_drv->driver; drv->bus = &ap_bus_type; drv->probe = ap_device_probe; drv->remove = ap_device_remove; drv->owner = owner; drv->name = name; return driver_register(drv); } EXPORT_SYMBOL(ap_driver_register); void ap_driver_unregister(struct ap_driver *ap_drv) { driver_unregister(&ap_drv->driver); } EXPORT_SYMBOL(ap_driver_unregister); void ap_bus_force_rescan(void) { /* reconfigure the AP bus rescan timer. */ mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); /* processing a asynchronous bus rescan */ queue_work(ap_work_queue, &ap_config_work); flush_work(&ap_config_work); } EXPORT_SYMBOL(ap_bus_force_rescan); /* * AP bus attributes. */ static ssize_t ap_domain_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); } static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); } static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_using_interrupts() ? 1 : 0); } static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); static ssize_t ap_config_time_store(struct bus_type *bus, const char *buf, size_t count) { int time; if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) return -EINVAL; ap_config_time = time; if (!timer_pending(&ap_config_timer) || !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { ap_config_timer.expires = jiffies + ap_config_time * HZ; add_timer(&ap_config_timer); } return count; } static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); } static ssize_t ap_poll_thread_store(struct bus_type *bus, const char *buf, size_t count) { int flag, rc; if (sscanf(buf, "%d\n", &flag) != 1) return -EINVAL; if (flag) { rc = ap_poll_thread_start(); if (rc) return rc; } else ap_poll_thread_stop(); return count; } static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); } static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, size_t count) { unsigned long long time; ktime_t hr_time; /* 120 seconds = maximum poll interval */ if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000ULL) return -EINVAL; poll_timeout = time; hr_time = ktime_set(0, poll_timeout); if (!hrtimer_is_queued(&ap_poll_timer) || !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { hrtimer_set_expires(&ap_poll_timer, hr_time); hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); } return count; } static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, &bus_attr_config_time, &bus_attr_poll_thread, &bus_attr_ap_interrupts, &bus_attr_poll_timeout, NULL, }; static inline int ap_test_config(unsigned int *field, unsigned int nr) { if (nr > 0xFFu) return 0; return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); } /* * ap_test_config_card_id(): Test, whether an AP card ID is configured. * @id AP card ID * * Returns 0 if the card is not configured * 1 if the card is configured or * if the configuration information is not available */ static inline int ap_test_config_card_id(unsigned int id) { if (!ap_configuration) return 1; return ap_test_config(ap_configuration->apm, id); } /* * ap_test_config_domain(): Test, whether an AP usage domain is configured. * @domain AP usage domain ID * * Returns 0 if the usage domain is not configured * 1 if the usage domain is configured or * if the configuration information is not available */ static inline int ap_test_config_domain(unsigned int domain) { if (!ap_configuration) return 1; return ap_test_config(ap_configuration->aqm, domain); } /** * ap_query_configuration(): Query AP configuration information. * * Query information of installed cards and configured domains from AP. */ static void ap_query_configuration(void) { #ifdef CONFIG_64BIT if (ap_configuration_available()) { if (!ap_configuration) ap_configuration = kzalloc(sizeof(struct ap_config_info), GFP_KERNEL); if (ap_configuration) __ap_query_configuration(ap_configuration); } else ap_configuration = NULL; #else ap_configuration = NULL; #endif } /** * ap_select_domain(): Select an AP domain. * * Pick one of the 16 AP domains. */ static int ap_select_domain(void) { int queue_depth, device_type, count, max_count, best_domain; ap_qid_t qid; int rc, i, j; /* * We want to use a single domain. Either the one specified with * the "domain=" parameter or the domain with the maximum number * of devices. */ if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) /* Domain has already been selected. */ return 0; best_domain = -1; max_count = 0; for (i = 0; i < AP_DOMAINS; i++) { if (!ap_test_config_domain(i)) continue; count = 0; for (j = 0; j < AP_DEVICES; j++) { if (!ap_test_config_card_id(j)) continue; qid = AP_MKQID(j, i); rc = ap_query_queue(qid, &queue_depth, &device_type); if (rc) continue; count++; } if (count > max_count) { max_count = count; best_domain = i; } } if (best_domain >= 0){ ap_domain_index = best_domain; return 0; } return -ENODEV; } /** * ap_probe_device_type(): Find the device type of an AP. * @ap_dev: pointer to the AP device. * * Find the device type if query queue returned a device type of 0. */ static int ap_probe_device_type(struct ap_device *ap_dev) { static unsigned char msg[] = { 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, }; struct ap_queue_status status; unsigned long long psmid; char *reply; int rc, i; reply = (void *) get_zeroed_page(GFP_KERNEL); if (!reply) { rc = -ENOMEM; goto out; } status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg), 0); if (status.response_code != AP_RESPONSE_NORMAL) { rc = -ENODEV; goto out_free; } /* Wait for the test message to complete. */ for (i = 0; i < 6; i++) { mdelay(300); status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); if (status.response_code == AP_RESPONSE_NORMAL && psmid == 0x0102030405060708ULL) break; } if (i < 6) { /* Got an answer. */ if (reply[0] == 0x00 && reply[1] == 0x86) ap_dev->device_type = AP_DEVICE_TYPE_PCICC; else ap_dev->device_type = AP_DEVICE_TYPE_PCICA; rc = 0; } else rc = -ENODEV; out_free: free_page((unsigned long) reply); out: return rc; } static void ap_interrupt_handler(struct airq_struct *airq) { inc_irq_stat(IRQIO_APB); tasklet_schedule(&ap_tasklet); } /** * __ap_scan_bus(): Scan the AP bus. * @dev: Pointer to device * @data: Pointer to data * * Scan the AP bus for new devices. */ static int __ap_scan_bus(struct device *dev, void *data) { return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; } static void ap_device_release(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); kfree(ap_dev); } static void ap_scan_bus(struct work_struct *unused) { struct ap_device *ap_dev; struct device *dev; ap_qid_t qid; int queue_depth, device_type; unsigned int device_functions; int rc, i; ap_query_configuration(); if (ap_select_domain() != 0) { return; } for (i = 0; i < AP_DEVICES; i++) { qid = AP_MKQID(i, ap_domain_index); dev = bus_find_device(&ap_bus_type, NULL, (void *)(unsigned long)qid, __ap_scan_bus); if (ap_test_config_card_id(i)) rc = ap_query_queue(qid, &queue_depth, &device_type); else rc = -ENODEV; if (dev) { if (rc == -EBUSY) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(AP_RESET_TIMEOUT); rc = ap_query_queue(qid, &queue_depth, &device_type); } ap_dev = to_ap_dev(dev); spin_lock_bh(&ap_dev->lock); if (rc || ap_dev->unregistered) { spin_unlock_bh(&ap_dev->lock); if (ap_dev->unregistered) i--; device_unregister(dev); put_device(dev); continue; } spin_unlock_bh(&ap_dev->lock); put_device(dev); continue; } if (rc) continue; rc = ap_init_queue(qid); if (rc) continue; ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); if (!ap_dev) break; ap_dev->qid = qid; ap_dev->queue_depth = queue_depth; ap_dev->unregistered = 1; spin_lock_init(&ap_dev->lock); INIT_LIST_HEAD(&ap_dev->pendingq); INIT_LIST_HEAD(&ap_dev->requestq); INIT_LIST_HEAD(&ap_dev->list); setup_timer(&ap_dev->timeout, ap_request_timeout, (unsigned long) ap_dev); switch (device_type) { case 0: /* device type probing for old cards */ if (ap_probe_device_type(ap_dev)) { kfree(ap_dev); continue; } break; default: ap_dev->device_type = device_type; } rc = ap_query_functions(qid, &device_functions); if (!rc) ap_dev->functions = device_functions; else ap_dev->functions = 0u; ap_dev->device.bus = &ap_bus_type; ap_dev->device.parent = ap_root_device; if (dev_set_name(&ap_dev->device, "card%02x", AP_QID_DEVICE(ap_dev->qid))) { kfree(ap_dev); continue; } ap_dev->device.release = ap_device_release; rc = device_register(&ap_dev->device); if (rc) { put_device(&ap_dev->device); continue; } /* Add device attributes. */ rc = sysfs_create_group(&ap_dev->device.kobj, &ap_dev_attr_group); if (!rc) { spin_lock_bh(&ap_dev->lock); ap_dev->unregistered = 0; spin_unlock_bh(&ap_dev->lock); } else device_unregister(&ap_dev->device); } } static void ap_config_timeout(unsigned long ptr) { queue_work(ap_work_queue, &ap_config_work); ap_config_timer.expires = jiffies + ap_config_time * HZ; add_timer(&ap_config_timer); } /** * __ap_schedule_poll_timer(): Schedule poll timer. * * Set up the timer to run the poll tasklet */ static inline void __ap_schedule_poll_timer(void) { ktime_t hr_time; spin_lock_bh(&ap_poll_timer_lock); if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) goto out; if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { hr_time = ktime_set(0, poll_timeout); hrtimer_forward_now(&ap_poll_timer, hr_time); hrtimer_restart(&ap_poll_timer); } out: spin_unlock_bh(&ap_poll_timer_lock); } /** * ap_schedule_poll_timer(): Schedule poll timer. * * Set up the timer to run the poll tasklet */ static inline void ap_schedule_poll_timer(void) { if (ap_using_interrupts()) return; __ap_schedule_poll_timer(); } /** * ap_poll_read(): Receive pending reply messages from an AP device. * @ap_dev: pointer to the AP device * @flags: pointer to control flags, bit 2^0 is set if another poll is * required, bit 2^1 is set if the poll timer needs to get armed * * Returns 0 if the device is still present, -ENODEV if not. */ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; if (ap_dev->queue_count <= 0) return 0; status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, ap_dev->reply->message, ap_dev->reply->length); switch (status.response_code) { case AP_RESPONSE_NORMAL: atomic_dec(&ap_poll_requests); ap_decrease_queue_count(ap_dev); list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { if (ap_msg->psmid != ap_dev->reply->psmid) continue; list_del_init(&ap_msg->list); ap_dev->pendingq_count--; ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); break; } if (ap_dev->queue_count > 0) *flags |= 1; break; case AP_RESPONSE_NO_PENDING_REPLY: if (status.queue_empty) { /* The card shouldn't forget requests but who knows. */ atomic_sub(ap_dev->queue_count, &ap_poll_requests); ap_dev->queue_count = 0; list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); ap_dev->requestq_count += ap_dev->pendingq_count; ap_dev->pendingq_count = 0; } else *flags |= 2; break; default: return -ENODEV; } return 0; } /** * ap_poll_write(): Send messages from the request queue to an AP device. * @ap_dev: pointer to the AP device * @flags: pointer to control flags, bit 2^0 is set if another poll is * required, bit 2^1 is set if the poll timer needs to get armed * * Returns 0 if the device is still present, -ENODEV if not. */ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; if (ap_dev->requestq_count <= 0 || ap_dev->queue_count >= ap_dev->queue_depth) return 0; /* Start the next request on the queue. */ ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); status = __ap_send(ap_dev->qid, ap_msg->psmid, ap_msg->message, ap_msg->length, ap_msg->special); switch (status.response_code) { case AP_RESPONSE_NORMAL: atomic_inc(&ap_poll_requests); ap_increase_queue_count(ap_dev); list_move_tail(&ap_msg->list, &ap_dev->pendingq); ap_dev->requestq_count--; ap_dev->pendingq_count++; if (ap_dev->queue_count < ap_dev->queue_depth && ap_dev->requestq_count > 0) *flags |= 1; *flags |= 2; break; case AP_RESPONSE_RESET_IN_PROGRESS: __ap_schedule_poll_timer(); case AP_RESPONSE_Q_FULL: *flags |= 2; break; case AP_RESPONSE_MESSAGE_TOO_BIG: case AP_RESPONSE_REQ_FAC_NOT_INST: return -EINVAL; default: return -ENODEV; } return 0; } /** * ap_poll_queue(): Poll AP device for pending replies and send new messages. * @ap_dev: pointer to the bus device * @flags: pointer to control flags, bit 2^0 is set if another poll is * required, bit 2^1 is set if the poll timer needs to get armed * * Poll AP device for pending replies and send new messages. If either * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. * Returns 0. */ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) { int rc; rc = ap_poll_read(ap_dev, flags); if (rc) return rc; return ap_poll_write(ap_dev, flags); } /** * __ap_queue_message(): Queue a message to a device. * @ap_dev: pointer to the AP device * @ap_msg: the message to be queued * * Queue a message to a device. Returns 0 if successful. */ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) { struct ap_queue_status status; if (list_empty(&ap_dev->requestq) && ap_dev->queue_count < ap_dev->queue_depth) { status = __ap_send(ap_dev->qid, ap_msg->psmid, ap_msg->message, ap_msg->length, ap_msg->special); switch (status.response_code) { case AP_RESPONSE_NORMAL: list_add_tail(&ap_msg->list, &ap_dev->pendingq); atomic_inc(&ap_poll_requests); ap_dev->pendingq_count++; ap_increase_queue_count(ap_dev); ap_dev->total_request_count++; break; case AP_RESPONSE_Q_FULL: case AP_RESPONSE_RESET_IN_PROGRESS: list_add_tail(&ap_msg->list, &ap_dev->requestq); ap_dev->requestq_count++; ap_dev->total_request_count++; return -EBUSY; case AP_RESPONSE_REQ_FAC_NOT_INST: case AP_RESPONSE_MESSAGE_TOO_BIG: ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); return -EINVAL; default: /* Device is gone. */ ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); return -ENODEV; } } else { list_add_tail(&ap_msg->list, &ap_dev->requestq); ap_dev->requestq_count++; ap_dev->total_request_count++; return -EBUSY; } ap_schedule_poll_timer(); return 0; } void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) { unsigned long flags; int rc; /* For asynchronous message handling a valid receive-callback * is required. */ BUG_ON(!ap_msg->receive); spin_lock_bh(&ap_dev->lock); if (!ap_dev->unregistered) { /* Make room on the queue by polling for finished requests. */ rc = ap_poll_queue(ap_dev, &flags); if (!rc) rc = __ap_queue_message(ap_dev, ap_msg); if (!rc) wake_up(&ap_poll_wait); if (rc == -ENODEV) ap_dev->unregistered = 1; } else { ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); rc = -ENODEV; } spin_unlock_bh(&ap_dev->lock); if (rc == -ENODEV) device_unregister(&ap_dev->device); } EXPORT_SYMBOL(ap_queue_message); /** * ap_cancel_message(): Cancel a crypto request. * @ap_dev: The AP device that has the message queued * @ap_msg: The message that is to be removed * * Cancel a crypto request. This is done by removing the request * from the device pending or request queue. Note that the * request stays on the AP queue. When it finishes the message * reply will be discarded because the psmid can't be found. */ void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) { struct ap_message *tmp; spin_lock_bh(&ap_dev->lock); if (!list_empty(&ap_msg->list)) { list_for_each_entry(tmp, &ap_dev->pendingq, list) if (tmp->psmid == ap_msg->psmid) { ap_dev->pendingq_count--; goto found; } ap_dev->requestq_count--; found: list_del_init(&ap_msg->list); } spin_unlock_bh(&ap_dev->lock); } EXPORT_SYMBOL(ap_cancel_message); /** * ap_poll_timeout(): AP receive polling for finished AP requests. * @unused: Unused pointer. * * Schedules the AP tasklet using a high resolution timer. */ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) { tasklet_schedule(&ap_tasklet); return HRTIMER_NORESTART; } /** * ap_reset(): Reset a not responding AP device. * @ap_dev: Pointer to the AP device * * Reset a not responding AP device and move all requests from the * pending queue to the request queue. */ static void ap_reset(struct ap_device *ap_dev) { int rc; ap_dev->reset = AP_RESET_IGNORE; atomic_sub(ap_dev->queue_count, &ap_poll_requests); ap_dev->queue_count = 0; list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); ap_dev->requestq_count += ap_dev->pendingq_count; ap_dev->pendingq_count = 0; rc = ap_init_queue(ap_dev->qid); if (rc == -ENODEV) ap_dev->unregistered = 1; else __ap_schedule_poll_timer(); } static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) { if (!ap_dev->unregistered) { if (ap_poll_queue(ap_dev, flags)) ap_dev->unregistered = 1; if (ap_dev->reset == AP_RESET_DO) ap_reset(ap_dev); } return 0; } /** * ap_poll_all(): Poll all AP devices. * @dummy: Unused variable * * Poll all AP devices on the bus in a round robin fashion. Continue * polling until bit 2^0 of the control flags is not set. If bit 2^1 * of the control flags has been set arm the poll timer. */ static void ap_poll_all(unsigned long dummy) { unsigned long flags; struct ap_device *ap_dev; /* Reset the indicator if interrupts are used. Thus new interrupts can * be received. Doing it in the beginning of the tasklet is therefor * important that no requests on any AP get lost. */ if (ap_using_interrupts()) xchg(ap_airq.lsi_ptr, 0); do { flags = 0; spin_lock(&ap_device_list_lock); list_for_each_entry(ap_dev, &ap_device_list, list) { spin_lock(&ap_dev->lock); __ap_poll_device(ap_dev, &flags); spin_unlock(&ap_dev->lock); } spin_unlock(&ap_device_list_lock); } while (flags & 1); if (flags & 2) ap_schedule_poll_timer(); } /** * ap_poll_thread(): Thread that polls for finished requests. * @data: Unused pointer * * AP bus poll thread. The purpose of this thread is to poll for * finished requests in a loop if there is a "free" cpu - that is * a cpu that doesn't have anything better to do. The polling stops * as soon as there is another task or if all messages have been * delivered. */ static int ap_poll_thread(void *data) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; int requests; struct ap_device *ap_dev; set_user_nice(current, 19); while (1) { if (ap_suspend_flag) return 0; if (need_resched()) { schedule(); continue; } add_wait_queue(&ap_poll_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; requests = atomic_read(&ap_poll_requests); if (requests <= 0) schedule(); set_current_state(TASK_RUNNING); remove_wait_queue(&ap_poll_wait, &wait); flags = 0; spin_lock_bh(&ap_device_list_lock); list_for_each_entry(ap_dev, &ap_device_list, list) { spin_lock(&ap_dev->lock); __ap_poll_device(ap_dev, &flags); spin_unlock(&ap_dev->lock); } spin_unlock_bh(&ap_device_list_lock); } set_current_state(TASK_RUNNING); remove_wait_queue(&ap_poll_wait, &wait); return 0; } static int ap_poll_thread_start(void) { int rc; if (ap_using_interrupts() || ap_suspend_flag) return 0; mutex_lock(&ap_poll_thread_mutex); if (!ap_poll_kthread) { ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); rc = PTR_RET(ap_poll_kthread); if (rc) ap_poll_kthread = NULL; } else rc = 0; mutex_unlock(&ap_poll_thread_mutex); return rc; } static void ap_poll_thread_stop(void) { mutex_lock(&ap_poll_thread_mutex); if (ap_poll_kthread) { kthread_stop(ap_poll_kthread); ap_poll_kthread = NULL; } mutex_unlock(&ap_poll_thread_mutex); } /** * ap_request_timeout(): Handling of request timeouts * @data: Holds the AP device. * * Handles request timeouts. */ static void ap_request_timeout(unsigned long data) { struct ap_device *ap_dev = (struct ap_device *) data; if (ap_dev->reset == AP_RESET_ARMED) { ap_dev->reset = AP_RESET_DO; if (ap_using_interrupts()) tasklet_schedule(&ap_tasklet); } } static void ap_reset_domain(void) { int i; if (ap_domain_index != -1) for (i = 0; i < AP_DEVICES; i++) ap_reset_queue(AP_MKQID(i, ap_domain_index)); } static void ap_reset_all(void) { int i, j; for (i = 0; i < AP_DOMAINS; i++) for (j = 0; j < AP_DEVICES; j++) ap_reset_queue(AP_MKQID(j, i)); } static struct reset_call ap_reset_call = { .fn = ap_reset_all, }; /** * ap_module_init(): The module initialization code. * * Initializes the module. */ int __init ap_module_init(void) { int rc, i; if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { pr_warning("%d is not a valid cryptographic domain\n", ap_domain_index); return -EINVAL; } /* In resume callback we need to know if the user had set the domain. * If so, we can not just reset it. */ if (ap_domain_index >= 0) user_set_domain = 1; if (ap_instructions_available() != 0) { pr_warning("The hardware system does not support " "AP instructions\n"); return -ENODEV; } if (ap_interrupts_available()) { rc = register_adapter_interrupt(&ap_airq); ap_airq_flag = (rc == 0); } register_reset_call(&ap_reset_call); /* Create /sys/bus/ap. */ rc = bus_register(&ap_bus_type); if (rc) goto out; for (i = 0; ap_bus_attrs[i]; i++) { rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); if (rc) goto out_bus; } /* Create /sys/devices/ap. */ ap_root_device = root_device_register("ap"); rc = PTR_RET(ap_root_device); if (rc) goto out_bus; ap_work_queue = create_singlethread_workqueue("kapwork"); if (!ap_work_queue) { rc = -ENOMEM; goto out_root; } ap_query_configuration(); if (ap_select_domain() == 0) ap_scan_bus(NULL); /* Setup the AP bus rescan timer. */ init_timer(&ap_config_timer); ap_config_timer.function = ap_config_timeout; ap_config_timer.data = 0; ap_config_timer.expires = jiffies + ap_config_time * HZ; add_timer(&ap_config_timer); /* Setup the high resultion poll timer. * If we are running under z/VM adjust polling to z/VM polling rate. */ if (MACHINE_IS_VM) poll_timeout = 1500000; spin_lock_init(&ap_poll_timer_lock); hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ap_poll_timer.function = ap_poll_timeout; /* Start the low priority AP bus poll thread. */ if (ap_thread_flag) { rc = ap_poll_thread_start(); if (rc) goto out_work; } return 0; out_work: del_timer_sync(&ap_config_timer); hrtimer_cancel(&ap_poll_timer); destroy_workqueue(ap_work_queue); out_root: root_device_unregister(ap_root_device); out_bus: while (i--) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); out: unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); return rc; } static int __ap_match_all(struct device *dev, void *data) { return 1; } /** * ap_modules_exit(): The module termination code * * Terminates the module. */ void ap_module_exit(void) { int i; struct device *dev; ap_reset_domain(); ap_poll_thread_stop(); del_timer_sync(&ap_config_timer); hrtimer_cancel(&ap_poll_timer); destroy_workqueue(ap_work_queue); tasklet_kill(&ap_tasklet); root_device_unregister(ap_root_device); while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, __ap_match_all))) { device_unregister(dev); put_device(dev); } for (i = 0; ap_bus_attrs[i]; i++) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); } module_init(ap_module_init); module_exit(ap_module_exit);
gpl-2.0
Xxskyl3rxX/linux
drivers/clk/bcm/clk-iproc-armpll.c
664
7665
/* * Copyright (C) 2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> #include <linux/clkdev.h> #include <linux/of_address.h> #define IPROC_CLK_MAX_FREQ_POLICY 0x3 #define IPROC_CLK_POLICY_FREQ_OFFSET 0x008 #define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT 8 #define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK 0x7 #define IPROC_CLK_PLLARMA_OFFSET 0xc00 #define IPROC_CLK_PLLARMA_LOCK_SHIFT 28 #define IPROC_CLK_PLLARMA_PDIV_SHIFT 24 #define IPROC_CLK_PLLARMA_PDIV_MASK 0xf #define IPROC_CLK_PLLARMA_NDIV_INT_SHIFT 8 #define IPROC_CLK_PLLARMA_NDIV_INT_MASK 0x3ff #define IPROC_CLK_PLLARMB_OFFSET 0xc04 #define IPROC_CLK_PLLARMB_NDIV_FRAC_MASK 0xfffff #define IPROC_CLK_PLLARMC_OFFSET 0xc08 #define IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT 8 #define IPROC_CLK_PLLARMC_MDIV_MASK 0xff #define IPROC_CLK_PLLARMCTL5_OFFSET 0xc20 #define IPROC_CLK_PLLARMCTL5_H_MDIV_MASK 0xff #define IPROC_CLK_PLLARM_OFFSET_OFFSET 0xc24 #define IPROC_CLK_PLLARM_SW_CTL_SHIFT 29 #define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT 20 #define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK 0xff #define IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK 0xfffff #define IPROC_CLK_ARM_DIV_OFFSET 0xe00 #define IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT 4 #define IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK 0xf #define IPROC_CLK_POLICY_DBG_OFFSET 0xec0 #define IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT 12 #define IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK 0x7 enum iproc_arm_pll_fid { ARM_PLL_FID_CRYSTAL_CLK = 0, ARM_PLL_FID_SYS_CLK = 2, ARM_PLL_FID_CH0_SLOW_CLK = 6, ARM_PLL_FID_CH1_FAST_CLK = 7 }; struct iproc_arm_pll { struct clk_hw hw; void __iomem *base; unsigned long rate; }; #define to_iproc_arm_pll(hw) container_of(hw, struct iproc_arm_pll, hw) static unsigned int __get_fid(struct iproc_arm_pll *pll) { u32 val; unsigned int policy, fid, active_fid; val = readl(pll->base + IPROC_CLK_ARM_DIV_OFFSET); if (val & (1 << IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT)) policy = val & IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK; else policy = 0; /* something is seriously wrong */ BUG_ON(policy > IPROC_CLK_MAX_FREQ_POLICY); val = readl(pll->base + IPROC_CLK_POLICY_FREQ_OFFSET); fid = (val >> (IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT * policy)) & IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK; val = readl(pll->base + IPROC_CLK_POLICY_DBG_OFFSET); active_fid = IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK & (val >> IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT); if (fid != active_fid) { pr_debug("%s: fid override %u->%u\n", __func__, fid, active_fid); fid = active_fid; } pr_debug("%s: active fid: %u\n", __func__, fid); return fid; } /* * Determine the mdiv (post divider) based on the frequency ID being used. * There are 4 sources that can be used to derive the output clock rate: * - 25 MHz Crystal * - System clock * - PLL channel 0 (slow clock) * - PLL channel 1 (fast clock) */ static int __get_mdiv(struct iproc_arm_pll *pll) { unsigned int fid; int mdiv; u32 val; fid = __get_fid(pll); switch (fid) { case ARM_PLL_FID_CRYSTAL_CLK: case ARM_PLL_FID_SYS_CLK: mdiv = 1; break; case ARM_PLL_FID_CH0_SLOW_CLK: val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET); mdiv = val & IPROC_CLK_PLLARMC_MDIV_MASK; if (mdiv == 0) mdiv = 256; break; case ARM_PLL_FID_CH1_FAST_CLK: val = readl(pll->base + IPROC_CLK_PLLARMCTL5_OFFSET); mdiv = val & IPROC_CLK_PLLARMCTL5_H_MDIV_MASK; if (mdiv == 0) mdiv = 256; break; default: mdiv = -EFAULT; } return mdiv; } static unsigned int __get_ndiv(struct iproc_arm_pll *pll) { u32 val; unsigned int ndiv_int, ndiv_frac, ndiv; val = readl(pll->base + IPROC_CLK_PLLARM_OFFSET_OFFSET); if (val & (1 << IPROC_CLK_PLLARM_SW_CTL_SHIFT)) { /* * offset mode is active. Read the ndiv from the PLLARM OFFSET * register */ ndiv_int = (val >> IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT) & IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK; if (ndiv_int == 0) ndiv_int = 256; ndiv_frac = val & IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK; } else { /* offset mode not active */ val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET); ndiv_int = (val >> IPROC_CLK_PLLARMA_NDIV_INT_SHIFT) & IPROC_CLK_PLLARMA_NDIV_INT_MASK; if (ndiv_int == 0) ndiv_int = 1024; val = readl(pll->base + IPROC_CLK_PLLARMB_OFFSET); ndiv_frac = val & IPROC_CLK_PLLARMB_NDIV_FRAC_MASK; } ndiv = (ndiv_int << 20) | ndiv_frac; return ndiv; } /* * The output frequency of the ARM PLL is calculated based on the ARM PLL * divider values: * pdiv = ARM PLL pre-divider * ndiv = ARM PLL multiplier * mdiv = ARM PLL post divider * * The frequency is calculated by: * ((ndiv * parent clock rate) / pdiv) / mdiv */ static unsigned long iproc_arm_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct iproc_arm_pll *pll = to_iproc_arm_pll(hw); u32 val; int mdiv; u64 ndiv; unsigned int pdiv; /* in bypass mode, use parent rate */ val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET); if (val & (1 << IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT)) { pll->rate = parent_rate; return pll->rate; } /* PLL needs to be locked */ val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET); if (!(val & (1 << IPROC_CLK_PLLARMA_LOCK_SHIFT))) { pll->rate = 0; return 0; } pdiv = (val >> IPROC_CLK_PLLARMA_PDIV_SHIFT) & IPROC_CLK_PLLARMA_PDIV_MASK; if (pdiv == 0) pdiv = 16; ndiv = __get_ndiv(pll); mdiv = __get_mdiv(pll); if (mdiv <= 0) { pll->rate = 0; return 0; } pll->rate = (ndiv * parent_rate) >> 20; pll->rate = (pll->rate / pdiv) / mdiv; pr_debug("%s: ARM PLL rate: %lu. parent rate: %lu\n", __func__, pll->rate, parent_rate); pr_debug("%s: ndiv_int: %u, pdiv: %u, mdiv: %d\n", __func__, (unsigned int)(ndiv >> 20), pdiv, mdiv); return pll->rate; } static const struct clk_ops iproc_arm_pll_ops = { .recalc_rate = iproc_arm_pll_recalc_rate, }; void __init iproc_armpll_setup(struct device_node *node) { int ret; struct clk *clk; struct iproc_arm_pll *pll; struct clk_init_data init; const char *parent_name; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (WARN_ON(!pll)) return; pll->base = of_iomap(node, 0); if (WARN_ON(!pll->base)) goto err_free_pll; init.name = node->name; init.ops = &iproc_arm_pll_ops; init.flags = 0; parent_name = of_clk_get_parent_name(node, 0); init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); pll->hw.init = &init; clk = clk_register(NULL, &pll->hw); if (WARN_ON(IS_ERR(clk))) goto err_iounmap; ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); if (WARN_ON(ret)) goto err_clk_unregister; return; err_clk_unregister: clk_unregister(clk); err_iounmap: iounmap(pll->base); err_free_pll: kfree(pll); }
gpl-2.0