repo_name
string
path
string
copies
string
size
string
content
string
license
string
TheSSJ/android_kernel_asus_moorefield
fs/xfs/xfs_quotaops.c
2290
3190
/* * Copyright (c) 2008, Christoph Hellwig * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_sb.h" #include "xfs_log.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_quota.h" #include "xfs_trans.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_qm.h" #include <linux/quota.h> STATIC int xfs_quota_type(int type) { switch (type) { case USRQUOTA: return XFS_DQ_USER; case GRPQUOTA: return XFS_DQ_GROUP; default: return XFS_DQ_PROJ; } } STATIC int xfs_fs_get_xstate( struct super_block *sb, struct fs_quota_stat *fqs) { struct xfs_mount *mp = XFS_M(sb); if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; return -xfs_qm_scall_getqstat(mp, fqs); } STATIC int xfs_fs_set_xstate( struct super_block *sb, unsigned int uflags, int op) { struct xfs_mount *mp = XFS_M(sb); unsigned int flags = 0; if (sb->s_flags & MS_RDONLY) return -EROFS; if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (uflags & FS_QUOTA_UDQ_ACCT) flags |= XFS_UQUOTA_ACCT; if (uflags & FS_QUOTA_PDQ_ACCT) flags |= XFS_PQUOTA_ACCT; if (uflags & FS_QUOTA_GDQ_ACCT) flags |= XFS_GQUOTA_ACCT; if (uflags & FS_QUOTA_UDQ_ENFD) flags |= XFS_UQUOTA_ENFD; if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD)) flags |= XFS_OQUOTA_ENFD; switch (op) { case Q_XQUOTAON: return -xfs_qm_scall_quotaon(mp, flags); case Q_XQUOTAOFF: if (!XFS_IS_QUOTA_ON(mp)) return -EINVAL; return -xfs_qm_scall_quotaoff(mp, flags); case Q_XQUOTARM: if (XFS_IS_QUOTA_ON(mp)) return -EINVAL; return -xfs_qm_scall_trunc_qfiles(mp, flags); } return -EINVAL; } STATIC int xfs_fs_get_dqblk( struct super_block *sb, struct kqid qid, struct fs_disk_quota *fdq) { struct xfs_mount *mp = XFS_M(sb); if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (!XFS_IS_QUOTA_ON(mp)) return -ESRCH; return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), xfs_quota_type(qid.type), fdq); } STATIC int xfs_fs_set_dqblk( struct super_block *sb, struct kqid qid, struct fs_disk_quota *fdq) { struct xfs_mount *mp = XFS_M(sb); if (sb->s_flags & MS_RDONLY) return -EROFS; if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (!XFS_IS_QUOTA_ON(mp)) return -ESRCH; return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), xfs_quota_type(qid.type), fdq); } const struct quotactl_ops xfs_quotactl_operations = { .get_xstate = xfs_fs_get_xstate, .set_xstate = xfs_fs_set_xstate, .get_dqblk = xfs_fs_get_dqblk, .set_dqblk = xfs_fs_set_dqblk, };
gpl-2.0
zyrgit/linux-yocto-3.10-work
drivers/gpu/drm/nouveau/core/engine/disp/vga.c
2546
6452
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/subdev.h> #include <core/device.h> #include <subdev/vga.h> u8 nv_rdport(void *obj, int head, u16 port) { struct nouveau_device *device = nv_device(obj); if (device->card_type >= NV_50) return nv_rd08(obj, 0x601000 + port); if (port == 0x03c0 || port == 0x03c1 || /* AR */ port == 0x03c2 || port == 0x03da || /* INP0 */ port == 0x03d4 || port == 0x03d5) /* CR */ return nv_rd08(obj, 0x601000 + (head * 0x2000) + port); if (port == 0x03c2 || port == 0x03cc || /* MISC */ port == 0x03c4 || port == 0x03c5 || /* SR */ port == 0x03ce || port == 0x03cf) { /* GR */ if (device->card_type < NV_40) head = 0; /* CR44 selects head */ return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port); } nv_error(obj, "unknown vga port 0x%04x\n", port); return 0x00; } void nv_wrport(void *obj, int head, u16 port, u8 data) { struct nouveau_device *device = nv_device(obj); if (device->card_type >= NV_50) nv_wr08(obj, 0x601000 + port, data); else if (port == 0x03c0 || port == 0x03c1 || /* AR */ port == 0x03c2 || port == 0x03da || /* INP0 */ port == 0x03d4 || port == 0x03d5) /* CR */ nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data); else if (port == 0x03c2 || port == 0x03cc || /* MISC */ port == 0x03c4 || port == 0x03c5 || /* SR */ port == 0x03ce || port == 0x03cf) { /* GR */ if (device->card_type < NV_40) head = 0; /* CR44 selects head */ nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data); } else nv_error(obj, "unknown vga port 0x%04x\n", port); } u8 nv_rdvgas(void *obj, int head, u8 index) { nv_wrport(obj, head, 0x03c4, index); return nv_rdport(obj, head, 0x03c5); } void nv_wrvgas(void *obj, int head, u8 index, u8 value) { nv_wrport(obj, head, 0x03c4, index); nv_wrport(obj, head, 0x03c5, value); } u8 nv_rdvgag(void *obj, int head, u8 index) { nv_wrport(obj, head, 0x03ce, index); return nv_rdport(obj, head, 0x03cf); } void nv_wrvgag(void *obj, int head, u8 index, u8 value) { nv_wrport(obj, head, 0x03ce, index); nv_wrport(obj, head, 0x03cf, value); } u8 nv_rdvgac(void *obj, int head, u8 index) { nv_wrport(obj, head, 0x03d4, index); return nv_rdport(obj, head, 0x03d5); } void nv_wrvgac(void *obj, int head, u8 index, u8 value) { nv_wrport(obj, head, 0x03d4, index); nv_wrport(obj, head, 0x03d5, value); } u8 nv_rdvgai(void *obj, int head, u16 port, u8 index) { if (port == 0x03c4) return nv_rdvgas(obj, head, index); if (port == 0x03ce) return nv_rdvgag(obj, head, index); if (port == 0x03d4) return nv_rdvgac(obj, head, index); nv_error(obj, "unknown indexed vga port 0x%04x\n", port); return 0x00; } void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value) { if (port == 0x03c4) nv_wrvgas(obj, head, index, value); else if (port == 0x03ce) nv_wrvgag(obj, head, index, value); else if (port == 0x03d4) nv_wrvgac(obj, head, index, value); else nv_error(obj, "unknown indexed vga port 0x%04x\n", port); } bool nv_lockvgac(void *obj, bool lock) { bool locked = !nv_rdvgac(obj, 0, 0x1f); u8 data = lock ? 0x99 : 0x57; nv_wrvgac(obj, 0, 0x1f, data); if (nv_device(obj)->chipset == 0x11) { if (!(nv_rd32(obj, 0x001084) & 0x10000000)) nv_wrvgac(obj, 1, 0x1f, data); } return locked; } /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) * it affects only the 8 bit vga io regs, which we access using mmio at * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d* * in general, the set value of cr44 does not matter: reg access works as * expected and values can be set for the appropriate head by using a 0x2000 * offset as required * however: * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and * cr44 must be set to 0 or 3 for accessing values on the correct head * through the common 0xc03c* addresses * b) in tied mode (4) head B is programmed to the values set on head A, and * access using the head B addresses can have strange results, ergo we leave * tied mode in init once we know to what cr44 should be restored on exit * * the owner parameter is slightly abused: * 0 and 1 are treated as head values and so the set value is (owner * 3) * other values are treated as literal values to set */ u8 nv_rdvgaowner(void *obj) { if (nv_device(obj)->card_type < NV_50) { if (nv_device(obj)->chipset == 0x11) { u32 tied = nv_rd32(obj, 0x001084) & 0x10000000; if (tied == 0) { u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80; u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01; u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80; u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01; if (slA && !tvA) return 0x00; if (slB && !tvB) return 0x03; if (slA) return 0x00; if (slB) return 0x03; return 0x00; } return 0x04; } return nv_rdvgac(obj, 0, 0x44); } nv_error(obj, "rdvgaowner after nv4x\n"); return 0x00; } void nv_wrvgaowner(void *obj, u8 select) { if (nv_device(obj)->card_type < NV_50) { u8 owner = (select == 1) ? 3 : select; if (nv_device(obj)->chipset == 0x11) { /* workaround hw lockup bug */ nv_rdvgac(obj, 0, 0x1f); nv_rdvgac(obj, 1, 0x1f); } nv_wrvgac(obj, 0, 0x44, owner); if (nv_device(obj)->chipset == 0x11) { nv_wrvgac(obj, 0, 0x2e, owner); nv_wrvgac(obj, 0, 0x2e, owner); } } else nv_error(obj, "wrvgaowner after nv4x\n"); }
gpl-2.0
Motorhead1991/samsung_att_kernel_source-msm7x30
drivers/staging/msm/mddi_toshiba.c
3058
63018
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" #include "mddi_toshiba.h" #define TM_GET_DID(id) ((id) & 0xff) #define TM_GET_PID(id) (((id) & 0xff00)>>8) #define MDDI_CLIENT_CORE_BASE 0x108000 #define LCD_CONTROL_BLOCK_BASE 0x110000 #define SPI_BLOCK_BASE 0x120000 #define PWM_BLOCK_BASE 0x140000 #define SYSTEM_BLOCK1_BASE 0x160000 #define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18) #define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C) #define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20) #define DPSUS (MDDI_CLIENT_CORE_BASE|0x24) #define DPRUN (MDDI_CLIENT_CORE_BASE|0x28) #define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C) #define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44) #define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48) #define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C) #define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50) #define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54) #define SRST (LCD_CONTROL_BLOCK_BASE|0x00) #define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04) #define START (LCD_CONTROL_BLOCK_BASE|0x08) #define PORT (LCD_CONTROL_BLOCK_BASE|0x0C) #define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18) #define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C) #define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20) #define PXL (LCD_CONTROL_BLOCK_BASE|0x30) #define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34) #define HSW (LCD_CONTROL_BLOCK_BASE|0x38) #define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C) #define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40) #define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44) #define VSW (LCD_CONTROL_BLOCK_BASE|0x48) #define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C) #define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50) #define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54) #define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C) #define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60) #define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64) #define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68) #define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C) #define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70) #define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74) #define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78) #define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C) #define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80) #define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84) #define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88) #define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C) #define MONI (LCD_CONTROL_BLOCK_BASE|0xB0) #define VPOS (LCD_CONTROL_BLOCK_BASE|0xC0) #define SSICTL (SPI_BLOCK_BASE|0x00) #define SSITIME (SPI_BLOCK_BASE|0x04) #define SSITX (SPI_BLOCK_BASE|0x08) #define SSIINTS (SPI_BLOCK_BASE|0x14) #define TIMER0LOAD (PWM_BLOCK_BASE|0x00) #define TIMER0CTRL (PWM_BLOCK_BASE|0x08) #define PWM0OFF (PWM_BLOCK_BASE|0x1C) #define TIMER1LOAD (PWM_BLOCK_BASE|0x20) #define TIMER1CTRL (PWM_BLOCK_BASE|0x28) #define PWM1OFF (PWM_BLOCK_BASE|0x3C) #define TIMER2LOAD (PWM_BLOCK_BASE|0x40) #define TIMER2CTRL (PWM_BLOCK_BASE|0x48) #define PWM2OFF (PWM_BLOCK_BASE|0x5C) #define PWMCR (PWM_BLOCK_BASE|0x68) #define GPIOIS (GPIO_BLOCK_BASE|0x08) #define GPIOIEV (GPIO_BLOCK_BASE|0x10) #define GPIOIC (GPIO_BLOCK_BASE|0x20) #define WKREQ (SYSTEM_BLOCK1_BASE|0x00) #define CLKENB (SYSTEM_BLOCK1_BASE|0x04) #define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08) #define INTMASK (SYSTEM_BLOCK1_BASE|0x0C) #define CNT_DIS (SYSTEM_BLOCK1_BASE|0x10) typedef enum { TOSHIBA_STATE_OFF, TOSHIBA_STATE_PRIM_SEC_STANDBY, TOSHIBA_STATE_PRIM_SEC_READY, TOSHIBA_STATE_PRIM_NORMAL_MODE, TOSHIBA_STATE_SEC_NORMAL_MODE } mddi_toshiba_state_t; static uint32 mddi_toshiba_curr_vpos; static boolean mddi_toshiba_monitor_refresh_value = FALSE; static boolean mddi_toshiba_report_refresh_measurements = FALSE; boolean mddi_toshiba_61Hz_refresh = TRUE; /* Modifications to timing to increase refresh rate to > 60Hz. * 20MHz dot clock. * 646 total rows. * 506 total columns. * refresh rate = 61.19Hz */ static uint32 mddi_toshiba_rows_per_second = 39526; static uint32 mddi_toshiba_usecs_per_refresh = 16344; static uint32 mddi_toshiba_rows_per_refresh = 646; extern boolean mddi_vsync_detect_enabled; static msm_fb_vsync_handler_type mddi_toshiba_vsync_handler; static void *mddi_toshiba_vsync_handler_arg; static uint16 mddi_toshiba_vsync_attempts; static mddi_toshiba_state_t toshiba_state = TOSHIBA_STATE_OFF; static struct msm_panel_common_pdata *mddi_toshiba_pdata; static int mddi_toshiba_lcd_on(struct platform_device *pdev); static int mddi_toshiba_lcd_off(struct platform_device *pdev); static void mddi_toshiba_state_transition(mddi_toshiba_state_t a, mddi_toshiba_state_t b) { if (toshiba_state != a) { MDDI_MSG_ERR("toshiba state trans. (%d->%d) found %d\n", a, b, toshiba_state); } toshiba_state = b; } #define GORDON_REG_IMGCTL1 0x10 /* Image interface control 1 */ #define GORDON_REG_IMGCTL2 0x11 /* Image interface control 2 */ #define GORDON_REG_IMGSET1 0x12 /* Image interface settings 1 */ #define GORDON_REG_IMGSET2 0x13 /* Image interface settings 2 */ #define GORDON_REG_IVBP1 0x14 /* DM0: Vert back porch */ #define GORDON_REG_IHBP1 0x15 /* DM0: Horiz back porch */ #define GORDON_REG_IVNUM1 0x16 /* DM0: Num of vert lines */ #define GORDON_REG_IHNUM1 0x17 /* DM0: Num of pixels per line */ #define GORDON_REG_IVBP2 0x18 /* DM1: Vert back porch */ #define GORDON_REG_IHBP2 0x19 /* DM1: Horiz back porch */ #define GORDON_REG_IVNUM2 0x1A /* DM1: Num of vert lines */ #define GORDON_REG_IHNUM2 0x1B /* DM1: Num of pixels per line */ #define GORDON_REG_LCDIFCTL1 0x30 /* LCD interface control 1 */ #define GORDON_REG_VALTRAN 0x31 /* LCD IF ctl: VALTRAN sync flag */ #define GORDON_REG_AVCTL 0x33 #define GORDON_REG_LCDIFCTL2 0x34 /* LCD interface control 2 */ #define GORDON_REG_LCDIFCTL3 0x35 /* LCD interface control 3 */ #define GORDON_REG_LCDIFSET1 0x36 /* LCD interface settings 1 */ #define GORDON_REG_PCCTL 0x3C #define GORDON_REG_TPARAM1 0x40 #define GORDON_REG_TLCDIF1 0x41 #define GORDON_REG_TSSPB_ST1 0x42 #define GORDON_REG_TSSPB_ED1 0x43 #define GORDON_REG_TSCK_ST1 0x44 #define GORDON_REG_TSCK_WD1 0x45 #define GORDON_REG_TGSPB_VST1 0x46 #define GORDON_REG_TGSPB_VED1 0x47 #define GORDON_REG_TGSPB_CH1 0x48 #define GORDON_REG_TGCK_ST1 0x49 #define GORDON_REG_TGCK_ED1 0x4A #define GORDON_REG_TPCTL_ST1 0x4B #define GORDON_REG_TPCTL_ED1 0x4C #define GORDON_REG_TPCHG_ED1 0x4D #define GORDON_REG_TCOM_CH1 0x4E #define GORDON_REG_THBP1 0x4F #define GORDON_REG_TPHCTL1 0x50 #define GORDON_REG_EVPH1 0x51 #define GORDON_REG_EVPL1 0x52 #define GORDON_REG_EVNH1 0x53 #define GORDON_REG_EVNL1 0x54 #define GORDON_REG_TBIAS1 0x55 #define GORDON_REG_TPARAM2 0x56 #define GORDON_REG_TLCDIF2 0x57 #define GORDON_REG_TSSPB_ST2 0x58 #define GORDON_REG_TSSPB_ED2 0x59 #define GORDON_REG_TSCK_ST2 0x5A #define GORDON_REG_TSCK_WD2 0x5B #define GORDON_REG_TGSPB_VST2 0x5C #define GORDON_REG_TGSPB_VED2 0x5D #define GORDON_REG_TGSPB_CH2 0x5E #define GORDON_REG_TGCK_ST2 0x5F #define GORDON_REG_TGCK_ED2 0x60 #define GORDON_REG_TPCTL_ST2 0x61 #define GORDON_REG_TPCTL_ED2 0x62 #define GORDON_REG_TPCHG_ED2 0x63 #define GORDON_REG_TCOM_CH2 0x64 #define GORDON_REG_THBP2 0x65 #define GORDON_REG_TPHCTL2 0x66 #define GORDON_REG_EVPH2 0x67 #define GORDON_REG_EVPL2 0x68 #define GORDON_REG_EVNH2 0x69 #define GORDON_REG_EVNL2 0x6A #define GORDON_REG_TBIAS2 0x6B #define GORDON_REG_POWCTL 0x80 #define GORDON_REG_POWOSC1 0x81 #define GORDON_REG_POWOSC2 0x82 #define GORDON_REG_POWSET 0x83 #define GORDON_REG_POWTRM1 0x85 #define GORDON_REG_POWTRM2 0x86 #define GORDON_REG_POWTRM3 0x87 #define GORDON_REG_POWTRMSEL 0x88 #define GORDON_REG_POWHIZ 0x89 void serigo(uint16 reg, uint8 data) { uint32 mddi_val = 0; mddi_queue_register_read(SSIINTS, &mddi_val, TRUE, 0); if (mddi_val & (1 << 8)) mddi_wait(1); /* No De-assert of CS and send 2 bytes */ mddi_val = 0x90000 | ((0x00FF & reg) << 8) | data; mddi_queue_register_write(SSITX, mddi_val, TRUE, 0); } void gordon_init(void) { /* Image interface settings ***/ serigo(GORDON_REG_IMGCTL2, 0x00); serigo(GORDON_REG_IMGSET1, 0x01); /* Exchange the RGB signal for J510(Softbank mobile) */ serigo(GORDON_REG_IMGSET2, 0x12); serigo(GORDON_REG_LCDIFSET1, 0x00); mddi_wait(2); /* Pre-charge settings */ serigo(GORDON_REG_PCCTL, 0x09); serigo(GORDON_REG_LCDIFCTL2, 0x1B); mddi_wait(1); } void gordon_disp_on(void) { /*gordon_dispmode setting */ /*VGA settings */ serigo(GORDON_REG_TPARAM1, 0x30); serigo(GORDON_REG_TLCDIF1, 0x00); serigo(GORDON_REG_TSSPB_ST1, 0x8B); serigo(GORDON_REG_TSSPB_ED1, 0x93); mddi_wait(2); serigo(GORDON_REG_TSCK_ST1, 0x88); serigo(GORDON_REG_TSCK_WD1, 0x00); serigo(GORDON_REG_TGSPB_VST1, 0x01); serigo(GORDON_REG_TGSPB_VED1, 0x02); mddi_wait(2); serigo(GORDON_REG_TGSPB_CH1, 0x5E); serigo(GORDON_REG_TGCK_ST1, 0x80); serigo(GORDON_REG_TGCK_ED1, 0x3C); serigo(GORDON_REG_TPCTL_ST1, 0x50); mddi_wait(2); serigo(GORDON_REG_TPCTL_ED1, 0x74); serigo(GORDON_REG_TPCHG_ED1, 0x78); serigo(GORDON_REG_TCOM_CH1, 0x50); serigo(GORDON_REG_THBP1, 0x84); mddi_wait(2); serigo(GORDON_REG_TPHCTL1, 0x00); serigo(GORDON_REG_EVPH1, 0x70); serigo(GORDON_REG_EVPL1, 0x64); serigo(GORDON_REG_EVNH1, 0x56); mddi_wait(2); serigo(GORDON_REG_EVNL1, 0x48); serigo(GORDON_REG_TBIAS1, 0x88); mddi_wait(2); serigo(GORDON_REG_TPARAM2, 0x28); serigo(GORDON_REG_TLCDIF2, 0x14); serigo(GORDON_REG_TSSPB_ST2, 0x49); serigo(GORDON_REG_TSSPB_ED2, 0x4B); mddi_wait(2); serigo(GORDON_REG_TSCK_ST2, 0x4A); serigo(GORDON_REG_TSCK_WD2, 0x02); serigo(GORDON_REG_TGSPB_VST2, 0x02); serigo(GORDON_REG_TGSPB_VED2, 0x03); mddi_wait(2); serigo(GORDON_REG_TGSPB_CH2, 0x2F); serigo(GORDON_REG_TGCK_ST2, 0x40); serigo(GORDON_REG_TGCK_ED2, 0x1E); serigo(GORDON_REG_TPCTL_ST2, 0x2C); mddi_wait(2); serigo(GORDON_REG_TPCTL_ED2, 0x3A); serigo(GORDON_REG_TPCHG_ED2, 0x3C); serigo(GORDON_REG_TCOM_CH2, 0x28); serigo(GORDON_REG_THBP2, 0x4D); mddi_wait(2); serigo(GORDON_REG_TPHCTL2, 0x1A); mddi_wait(2); serigo(GORDON_REG_IVBP1, 0x02); serigo(GORDON_REG_IHBP1, 0x90); serigo(GORDON_REG_IVNUM1, 0xA0); serigo(GORDON_REG_IHNUM1, 0x78); mddi_wait(2); serigo(GORDON_REG_IVBP2, 0x02); serigo(GORDON_REG_IHBP2, 0x48); serigo(GORDON_REG_IVNUM2, 0x50); serigo(GORDON_REG_IHNUM2, 0x3C); mddi_wait(2); serigo(GORDON_REG_POWCTL, 0x03); mddi_wait(15); serigo(GORDON_REG_POWCTL, 0x07); mddi_wait(15); serigo(GORDON_REG_POWCTL, 0x0F); mddi_wait(15); serigo(GORDON_REG_AVCTL, 0x03); mddi_wait(15); serigo(GORDON_REG_POWCTL, 0x1F); mddi_wait(15); serigo(GORDON_REG_POWCTL, 0x5F); mddi_wait(15); serigo(GORDON_REG_POWCTL, 0x7F); mddi_wait(15); serigo(GORDON_REG_LCDIFCTL1, 0x02); mddi_wait(15); serigo(GORDON_REG_IMGCTL1, 0x00); mddi_wait(15); serigo(GORDON_REG_LCDIFCTL3, 0x00); mddi_wait(15); serigo(GORDON_REG_VALTRAN, 0x01); mddi_wait(15); serigo(GORDON_REG_LCDIFCTL1, 0x03); serigo(GORDON_REG_LCDIFCTL1, 0x03); mddi_wait(1); } void gordon_disp_off(void) { serigo(GORDON_REG_LCDIFCTL2, 0x7B); serigo(GORDON_REG_VALTRAN, 0x01); serigo(GORDON_REG_LCDIFCTL1, 0x02); serigo(GORDON_REG_LCDIFCTL3, 0x01); mddi_wait(20); serigo(GORDON_REG_VALTRAN, 0x01); serigo(GORDON_REG_IMGCTL1, 0x01); serigo(GORDON_REG_LCDIFCTL1, 0x00); mddi_wait(20); serigo(GORDON_REG_POWCTL, 0x1F); mddi_wait(40); serigo(GORDON_REG_POWCTL, 0x07); mddi_wait(40); serigo(GORDON_REG_POWCTL, 0x03); mddi_wait(40); serigo(GORDON_REG_POWCTL, 0x00); mddi_wait(40); } void gordon_disp_init(void) { gordon_init(); mddi_wait(20); gordon_disp_on(); } static void toshiba_common_initial_setup(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) { write_client_reg(DPSET0 , 0x4bec0066, TRUE); write_client_reg(DPSET1 , 0x00000113, TRUE); write_client_reg(DPSUS , 0x00000000, TRUE); write_client_reg(DPRUN , 0x00000001, TRUE); mddi_wait(5); write_client_reg(SYSCKENA , 0x00000001, TRUE); write_client_reg(CLKENB , 0x0000a0e9, TRUE); write_client_reg(GPIODATA , 0x03FF0000, TRUE); write_client_reg(GPIODIR , 0x0000024D, TRUE); write_client_reg(GPIOSEL , 0x00000173, TRUE); write_client_reg(GPIOPC , 0x03C300C0, TRUE); write_client_reg(WKREQ , 0x00000000, TRUE); write_client_reg(GPIOIS , 0x00000000, TRUE); write_client_reg(GPIOIEV , 0x00000001, TRUE); write_client_reg(GPIOIC , 0x000003FF, TRUE); write_client_reg(GPIODATA , 0x00040004, TRUE); write_client_reg(GPIODATA , 0x00080008, TRUE); write_client_reg(DRAMPWR , 0x00000001, TRUE); write_client_reg(CLKENB , 0x0000a0eb, TRUE); write_client_reg(PWMCR , 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSICTL , 0x00060399, TRUE); write_client_reg(SSITIME , 0x00000100, TRUE); write_client_reg(CNT_DIS , 0x00000002, TRUE); write_client_reg(SSICTL , 0x0006039b, TRUE); write_client_reg(SSITX , 0x00000000, TRUE); mddi_wait(7); write_client_reg(SSITX , 0x00000000, TRUE); mddi_wait(7); write_client_reg(SSITX , 0x00000000, TRUE); mddi_wait(7); write_client_reg(SSITX , 0x000800BA, TRUE); write_client_reg(SSITX , 0x00000111, TRUE); write_client_reg(SSITX , 0x00080036, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x0008003A, TRUE); write_client_reg(SSITX , 0x00000160, TRUE); write_client_reg(SSITX , 0x000800B1, TRUE); write_client_reg(SSITX , 0x0000015D, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B2, TRUE); write_client_reg(SSITX , 0x00000133, TRUE); write_client_reg(SSITX , 0x000800B3, TRUE); write_client_reg(SSITX , 0x00000122, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B4, TRUE); write_client_reg(SSITX , 0x00000102, TRUE); write_client_reg(SSITX , 0x000800B5, TRUE); write_client_reg(SSITX , 0x0000011E, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B6, TRUE); write_client_reg(SSITX , 0x00000127, TRUE); write_client_reg(SSITX , 0x000800B7, TRUE); write_client_reg(SSITX , 0x00000103, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B9, TRUE); write_client_reg(SSITX , 0x00000124, TRUE); write_client_reg(SSITX , 0x000800BD, TRUE); write_client_reg(SSITX , 0x000001A1, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800BB, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); write_client_reg(SSITX , 0x000800BF, TRUE); write_client_reg(SSITX , 0x00000101, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800BE, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); write_client_reg(SSITX , 0x000800C0, TRUE); write_client_reg(SSITX , 0x00000111, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C1, TRUE); write_client_reg(SSITX , 0x00000111, TRUE); write_client_reg(SSITX , 0x000800C2, TRUE); write_client_reg(SSITX , 0x00000111, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C3, TRUE); write_client_reg(SSITX , 0x00080132, TRUE); write_client_reg(SSITX , 0x00000132, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C4, TRUE); write_client_reg(SSITX , 0x00080132, TRUE); write_client_reg(SSITX , 0x00000132, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C5, TRUE); write_client_reg(SSITX , 0x00080132, TRUE); write_client_reg(SSITX , 0x00000132, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C6, TRUE); write_client_reg(SSITX , 0x00080132, TRUE); write_client_reg(SSITX , 0x00000132, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C7, TRUE); write_client_reg(SSITX , 0x00080164, TRUE); write_client_reg(SSITX , 0x00000145, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800C8, TRUE); write_client_reg(SSITX , 0x00000144, TRUE); write_client_reg(SSITX , 0x000800C9, TRUE); write_client_reg(SSITX , 0x00000152, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800CA, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800EC, TRUE); write_client_reg(SSITX , 0x00080101, TRUE); write_client_reg(SSITX , 0x000001FC, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800CF, TRUE); write_client_reg(SSITX , 0x00000101, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D0, TRUE); write_client_reg(SSITX , 0x00080110, TRUE); write_client_reg(SSITX , 0x00000104, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D1, TRUE); write_client_reg(SSITX , 0x00000101, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D2, TRUE); write_client_reg(SSITX , 0x00080100, TRUE); write_client_reg(SSITX , 0x00000128, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D3, TRUE); write_client_reg(SSITX , 0x00080100, TRUE); write_client_reg(SSITX , 0x00000128, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D4, TRUE); write_client_reg(SSITX , 0x00080126, TRUE); write_client_reg(SSITX , 0x000001A4, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800D5, TRUE); write_client_reg(SSITX , 0x00000120, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800EF, TRUE); write_client_reg(SSITX , 0x00080132, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); mddi_wait(1); write_client_reg(BITMAP0 , 0x032001E0, TRUE); write_client_reg(BITMAP1 , 0x032001E0, TRUE); write_client_reg(BITMAP2 , 0x014000F0, TRUE); write_client_reg(BITMAP3 , 0x014000F0, TRUE); write_client_reg(BITMAP4 , 0x014000F0, TRUE); write_client_reg(CLKENB , 0x0000A1EB, TRUE); write_client_reg(PORT_ENB , 0x00000001, TRUE); write_client_reg(PORT , 0x00000004, TRUE); write_client_reg(PXL , 0x00000002, TRUE); write_client_reg(MPLFBUF , 0x00000000, TRUE); write_client_reg(HCYCLE , 0x000000FD, TRUE); write_client_reg(HSW , 0x00000003, TRUE); write_client_reg(HDE_START , 0x00000007, TRUE); write_client_reg(HDE_SIZE , 0x000000EF, TRUE); write_client_reg(VCYCLE , 0x00000325, TRUE); write_client_reg(VSW , 0x00000001, TRUE); write_client_reg(VDE_START , 0x00000003, TRUE); write_client_reg(VDE_SIZE , 0x0000031F, TRUE); write_client_reg(START , 0x00000001, TRUE); mddi_wait(32); write_client_reg(SSITX , 0x000800BC, TRUE); write_client_reg(SSITX , 0x00000180, TRUE); write_client_reg(SSITX , 0x0008003B, TRUE); write_client_reg(SSITX , 0x00000100, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B0, TRUE); write_client_reg(SSITX , 0x00000116, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x000800B8, TRUE); write_client_reg(SSITX , 0x000801FF, TRUE); write_client_reg(SSITX , 0x000001F5, TRUE); mddi_wait(1); write_client_reg(SSITX , 0x00000011, TRUE); mddi_wait(5); write_client_reg(SSITX , 0x00000029, TRUE); return; } if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) { write_client_reg(DPSET0, 0x4BEC0066, TRUE); write_client_reg(DPSET1, 0x00000113, TRUE); write_client_reg(DPSUS, 0x00000000, TRUE); write_client_reg(DPRUN, 0x00000001, TRUE); mddi_wait(14); write_client_reg(SYSCKENA, 0x00000001, TRUE); write_client_reg(CLKENB, 0x000000EF, TRUE); write_client_reg(GPIO_BLOCK_BASE, 0x03FF0000, TRUE); write_client_reg(GPIODIR, 0x0000024D, TRUE); write_client_reg(SYSTEM_BLOCK2_BASE, 0x00000173, TRUE); write_client_reg(GPIOPC, 0x03C300C0, TRUE); write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000000, TRUE); write_client_reg(GPIOIS, 0x00000000, TRUE); write_client_reg(GPIOIEV, 0x00000001, TRUE); write_client_reg(GPIOIC, 0x000003FF, TRUE); write_client_reg(GPIO_BLOCK_BASE, 0x00060006, TRUE); write_client_reg(GPIO_BLOCK_BASE, 0x00080008, TRUE); write_client_reg(GPIO_BLOCK_BASE, 0x02000200, TRUE); write_client_reg(DRAMPWR, 0x00000001, TRUE); write_client_reg(TIMER0CTRL, 0x00000060, TRUE); write_client_reg(PWM_BLOCK_BASE, 0x00001388, TRUE); write_client_reg(PWM0OFF, 0x00001387, TRUE); write_client_reg(TIMER1CTRL, 0x00000060, TRUE); write_client_reg(TIMER1LOAD, 0x00001388, TRUE); write_client_reg(PWM1OFF, 0x00001387, TRUE); write_client_reg(TIMER0CTRL, 0x000000E0, TRUE); write_client_reg(TIMER1CTRL, 0x000000E0, TRUE); write_client_reg(PWMCR, 0x00000003, TRUE); mddi_wait(1); write_client_reg(SPI_BLOCK_BASE, 0x00063111, TRUE); write_client_reg(SSITIME, 0x00000100, TRUE); write_client_reg(SPI_BLOCK_BASE, 0x00063113, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(CLKENB, 0x0000A1EF, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(WRSTB, 0x0000003F, TRUE); write_client_reg(RDSTB, 0x00000432, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000000, TRUE); write_client_reg(ASY_DATB, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(10); write_client_reg(ASY_DATA, 0x80000000, TRUE); write_client_reg(ASY_DATB, 0x80000000, TRUE); write_client_reg(ASY_DATC, 0x80000000, TRUE); write_client_reg(ASY_DATD, 0x80000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000009, TRUE); write_client_reg(ASY_CMDSET, 0x00000008, TRUE); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004005, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(20); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); write_client_reg(VSYNIF, 0x00000001, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); } else { write_client_reg(DPSET0, 0x4BEC0066, TRUE); write_client_reg(DPSET1, 0x00000113, TRUE); write_client_reg(DPSUS, 0x00000000, TRUE); write_client_reg(DPRUN, 0x00000001, TRUE); mddi_wait(14); write_client_reg(SYSCKENA, 0x00000001, TRUE); write_client_reg(CLKENB, 0x000000EF, TRUE); write_client_reg(GPIODATA, 0x03FF0000, TRUE); write_client_reg(GPIODIR, 0x0000024D, TRUE); write_client_reg(GPIOSEL, 0x00000173, TRUE); write_client_reg(GPIOPC, 0x03C300C0, TRUE); write_client_reg(WKREQ, 0x00000000, TRUE); write_client_reg(GPIOIS, 0x00000000, TRUE); write_client_reg(GPIOIEV, 0x00000001, TRUE); write_client_reg(GPIOIC, 0x000003FF, TRUE); write_client_reg(GPIODATA, 0x00060006, TRUE); write_client_reg(GPIODATA, 0x00080008, TRUE); write_client_reg(GPIODATA, 0x02000200, TRUE); if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA) { mddi_wait(400); write_client_reg(DRAMPWR, 0x00000001, TRUE); write_client_reg(CNT_DIS, 0x00000002, TRUE); write_client_reg(BITMAP0, 0x01E00320, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); write_client_reg(PORT, 0x00000004, TRUE); write_client_reg(PXL, 0x0000003A, TRUE); write_client_reg(MPLFBUF, 0x00000000, TRUE); write_client_reg(HCYCLE, 0x00000253, TRUE); write_client_reg(HSW, 0x00000003, TRUE); write_client_reg(HDE_START, 0x00000017, TRUE); write_client_reg(HDE_SIZE, 0x0000018F, TRUE); write_client_reg(VCYCLE, 0x000001FF, TRUE); write_client_reg(VSW, 0x00000001, TRUE); write_client_reg(VDE_START, 0x00000003, TRUE); write_client_reg(VDE_SIZE, 0x000001DF, TRUE); write_client_reg(START, 0x00000001, TRUE); mddi_wait(1); write_client_reg(TIMER0CTRL, 0x00000060, TRUE); write_client_reg(TIMER0LOAD, 0x00001388, TRUE); write_client_reg(TIMER1CTRL, 0x00000060, TRUE); write_client_reg(TIMER1LOAD, 0x00001388, TRUE); write_client_reg(PWM1OFF, 0x00000087, TRUE); } else { write_client_reg(DRAMPWR, 0x00000001, TRUE); write_client_reg(TIMER0CTRL, 0x00000060, TRUE); write_client_reg(TIMER0LOAD, 0x00001388, TRUE); write_client_reg(TIMER1CTRL, 0x00000060, TRUE); write_client_reg(TIMER1LOAD, 0x00001388, TRUE); write_client_reg(PWM1OFF, 0x00001387, TRUE); } write_client_reg(TIMER0CTRL, 0x000000E0, TRUE); write_client_reg(TIMER1CTRL, 0x000000E0, TRUE); write_client_reg(PWMCR, 0x00000003, TRUE); mddi_wait(1); write_client_reg(SSICTL, 0x00000799, TRUE); write_client_reg(SSITIME, 0x00000100, TRUE); write_client_reg(SSICTL, 0x0000079b, TRUE); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000000, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x000800BA, TRUE); write_client_reg(SSITX, 0x00000111, TRUE); write_client_reg(SSITX, 0x00080036, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800BB, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); write_client_reg(SSITX, 0x0008003A, TRUE); write_client_reg(SSITX, 0x00000160, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800BF, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); write_client_reg(SSITX, 0x000800B1, TRUE); write_client_reg(SSITX, 0x0000015D, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800B2, TRUE); write_client_reg(SSITX, 0x00000133, TRUE); write_client_reg(SSITX, 0x000800B3, TRUE); write_client_reg(SSITX, 0x00000122, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800B4, TRUE); write_client_reg(SSITX, 0x00000102, TRUE); write_client_reg(SSITX, 0x000800B5, TRUE); write_client_reg(SSITX, 0x0000011F, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800B6, TRUE); write_client_reg(SSITX, 0x00000128, TRUE); write_client_reg(SSITX, 0x000800B7, TRUE); write_client_reg(SSITX, 0x00000103, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800B9, TRUE); write_client_reg(SSITX, 0x00000120, TRUE); write_client_reg(SSITX, 0x000800BD, TRUE); write_client_reg(SSITX, 0x00000102, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800BE, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); write_client_reg(SSITX, 0x000800C0, TRUE); write_client_reg(SSITX, 0x00000111, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C1, TRUE); write_client_reg(SSITX, 0x00000111, TRUE); write_client_reg(SSITX, 0x000800C2, TRUE); write_client_reg(SSITX, 0x00000111, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C3, TRUE); write_client_reg(SSITX, 0x0008010A, TRUE); write_client_reg(SSITX, 0x0000010A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C4, TRUE); write_client_reg(SSITX, 0x00080160, TRUE); write_client_reg(SSITX, 0x00000160, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C5, TRUE); write_client_reg(SSITX, 0x00080160, TRUE); write_client_reg(SSITX, 0x00000160, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C6, TRUE); write_client_reg(SSITX, 0x00080160, TRUE); write_client_reg(SSITX, 0x00000160, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C7, TRUE); write_client_reg(SSITX, 0x00080133, TRUE); write_client_reg(SSITX, 0x00000143, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800C8, TRUE); write_client_reg(SSITX, 0x00000144, TRUE); write_client_reg(SSITX, 0x000800C9, TRUE); write_client_reg(SSITX, 0x00000133, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800CA, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800EC, TRUE); write_client_reg(SSITX, 0x00080102, TRUE); write_client_reg(SSITX, 0x00000118, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800CF, TRUE); write_client_reg(SSITX, 0x00000101, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D0, TRUE); write_client_reg(SSITX, 0x00080110, TRUE); write_client_reg(SSITX, 0x00000104, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D1, TRUE); write_client_reg(SSITX, 0x00000101, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D2, TRUE); write_client_reg(SSITX, 0x00080100, TRUE); write_client_reg(SSITX, 0x0000013A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D3, TRUE); write_client_reg(SSITX, 0x00080100, TRUE); write_client_reg(SSITX, 0x0000013A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D4, TRUE); write_client_reg(SSITX, 0x00080124, TRUE); write_client_reg(SSITX, 0x0000016E, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x000800D5, TRUE); write_client_reg(SSITX, 0x00000124, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800ED, TRUE); write_client_reg(SSITX, 0x00080101, TRUE); write_client_reg(SSITX, 0x0000010A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D6, TRUE); write_client_reg(SSITX, 0x00000101, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D7, TRUE); write_client_reg(SSITX, 0x00080110, TRUE); write_client_reg(SSITX, 0x0000010A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D8, TRUE); write_client_reg(SSITX, 0x00000101, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800D9, TRUE); write_client_reg(SSITX, 0x00080100, TRUE); write_client_reg(SSITX, 0x00000114, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800DE, TRUE); write_client_reg(SSITX, 0x00080100, TRUE); write_client_reg(SSITX, 0x00000114, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800DF, TRUE); write_client_reg(SSITX, 0x00080112, TRUE); write_client_reg(SSITX, 0x0000013F, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E0, TRUE); write_client_reg(SSITX, 0x0000010B, TRUE); write_client_reg(SSITX, 0x000800E2, TRUE); write_client_reg(SSITX, 0x00000101, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E3, TRUE); write_client_reg(SSITX, 0x00000136, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E4, TRUE); write_client_reg(SSITX, 0x00080100, TRUE); write_client_reg(SSITX, 0x00000103, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E5, TRUE); write_client_reg(SSITX, 0x00080102, TRUE); write_client_reg(SSITX, 0x00000104, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E6, TRUE); write_client_reg(SSITX, 0x00000103, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E7, TRUE); write_client_reg(SSITX, 0x00080104, TRUE); write_client_reg(SSITX, 0x0000010A, TRUE); mddi_wait(2); write_client_reg(SSITX, 0x000800E8, TRUE); write_client_reg(SSITX, 0x00000104, TRUE); write_client_reg(CLKENB, 0x000001EF, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(WRSTB, 0x0000003F, TRUE); write_client_reg(RDSTB, 0x00000432, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000000, TRUE); write_client_reg(ASY_DATB, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(10); write_client_reg(ASY_DATA, 0x80000000, TRUE); write_client_reg(ASY_DATB, 0x80000000, TRUE); write_client_reg(ASY_DATC, 0x80000000, TRUE); write_client_reg(ASY_DATD, 0x80000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000009, TRUE); write_client_reg(ASY_CMDSET, 0x00000008, TRUE); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004005, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(20); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); write_client_reg(VSYNIF, 0x00000001, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); } mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_STANDBY, TOSHIBA_STATE_PRIM_SEC_READY); } static void toshiba_prim_start(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) { write_client_reg(BITMAP1, 0x01E000F0, TRUE); write_client_reg(BITMAP2, 0x01E000F0, TRUE); write_client_reg(BITMAP3, 0x01E000F0, TRUE); write_client_reg(BITMAP4, 0x00DC00B0, TRUE); write_client_reg(CLKENB, 0x000001EF, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); write_client_reg(PORT, 0x00000016, TRUE); write_client_reg(PXL, 0x00000002, TRUE); write_client_reg(MPLFBUF, 0x00000000, TRUE); write_client_reg(HCYCLE, 0x00000185, TRUE); write_client_reg(HSW, 0x00000018, TRUE); write_client_reg(HDE_START, 0x0000004A, TRUE); write_client_reg(HDE_SIZE, 0x000000EF, TRUE); write_client_reg(VCYCLE, 0x0000028E, TRUE); write_client_reg(VSW, 0x00000004, TRUE); write_client_reg(VDE_START, 0x00000009, TRUE); write_client_reg(VDE_SIZE, 0x0000027F, TRUE); write_client_reg(START, 0x00000001, TRUE); write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000002, TRUE); } else{ write_client_reg(VSYNIF, 0x00000001, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); write_client_reg(BITMAP1, 0x01E000F0, TRUE); write_client_reg(BITMAP2, 0x01E000F0, TRUE); write_client_reg(BITMAP3, 0x01E000F0, TRUE); write_client_reg(BITMAP4, 0x00DC00B0, TRUE); write_client_reg(CLKENB, 0x000001EF, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); write_client_reg(PORT, 0x00000004, TRUE); write_client_reg(PXL, 0x00000002, TRUE); write_client_reg(MPLFBUF, 0x00000000, TRUE); if (mddi_toshiba_61Hz_refresh) { write_client_reg(HCYCLE, 0x000000FC, TRUE); mddi_toshiba_rows_per_second = 39526; mddi_toshiba_rows_per_refresh = 646; mddi_toshiba_usecs_per_refresh = 16344; } else { write_client_reg(HCYCLE, 0x0000010b, TRUE); mddi_toshiba_rows_per_second = 37313; mddi_toshiba_rows_per_refresh = 646; mddi_toshiba_usecs_per_refresh = 17313; } write_client_reg(HSW, 0x00000003, TRUE); write_client_reg(HDE_START, 0x00000007, TRUE); write_client_reg(HDE_SIZE, 0x000000EF, TRUE); write_client_reg(VCYCLE, 0x00000285, TRUE); write_client_reg(VSW, 0x00000001, TRUE); write_client_reg(VDE_START, 0x00000003, TRUE); write_client_reg(VDE_SIZE, 0x0000027F, TRUE); write_client_reg(START, 0x00000001, TRUE); mddi_wait(10); write_client_reg(SSITX, 0x000800BC, TRUE); write_client_reg(SSITX, 0x00000180, TRUE); write_client_reg(SSITX, 0x0008003B, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x000800B0, TRUE); write_client_reg(SSITX, 0x00000116, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x000800B8, TRUE); write_client_reg(SSITX, 0x000801FF, TRUE); write_client_reg(SSITX, 0x000001F5, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x00000011, TRUE); write_client_reg(SSITX, 0x00000029, TRUE); write_client_reg(WKREQ, 0x00000000, TRUE); write_client_reg(WAKEUP, 0x00000000, TRUE); write_client_reg(INTMSK, 0x00000001, TRUE); } mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY, TOSHIBA_STATE_PRIM_NORMAL_MODE); } static void toshiba_sec_start(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(CLKENB, 0x000011EF, TRUE); write_client_reg(BITMAP0, 0x028001E0, TRUE); write_client_reg(BITMAP1, 0x00000000, TRUE); write_client_reg(BITMAP2, 0x00000000, TRUE); write_client_reg(BITMAP3, 0x00000000, TRUE); write_client_reg(BITMAP4, 0x00DC00B0, TRUE); write_client_reg(PORT, 0x00000000, TRUE); write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(MPLFBUF, 0x00000004, TRUE); write_client_reg(HCYCLE, 0x0000006B, TRUE); write_client_reg(HSW, 0x00000003, TRUE); write_client_reg(HDE_START, 0x00000007, TRUE); write_client_reg(HDE_SIZE, 0x00000057, TRUE); write_client_reg(VCYCLE, 0x000000E6, TRUE); write_client_reg(VSW, 0x00000001, TRUE); write_client_reg(VDE_START, 0x00000003, TRUE); write_client_reg(VDE_SIZE, 0x000000DB, TRUE); write_client_reg(ASY_DATA, 0x80000001, TRUE); write_client_reg(ASY_DATB, 0x0000011B, TRUE); write_client_reg(ASY_DATC, 0x80000002, TRUE); write_client_reg(ASY_DATD, 0x00000700, TRUE); write_client_reg(ASY_DATE, 0x80000003, TRUE); write_client_reg(ASY_DATF, 0x00000230, TRUE); write_client_reg(ASY_DATG, 0x80000008, TRUE); write_client_reg(ASY_DATH, 0x00000402, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000009, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_DATC, 0x8000000B, TRUE); write_client_reg(ASY_DATD, 0x00000000, TRUE); write_client_reg(ASY_DATE, 0x8000000C, TRUE); write_client_reg(ASY_DATF, 0x00000000, TRUE); write_client_reg(ASY_DATG, 0x8000000D, TRUE); write_client_reg(ASY_DATH, 0x00000409, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x8000000E, TRUE); write_client_reg(ASY_DATB, 0x00000409, TRUE); write_client_reg(ASY_DATC, 0x80000030, TRUE); write_client_reg(ASY_DATD, 0x00000000, TRUE); write_client_reg(ASY_DATE, 0x80000031, TRUE); write_client_reg(ASY_DATF, 0x00000100, TRUE); write_client_reg(ASY_DATG, 0x80000032, TRUE); write_client_reg(ASY_DATH, 0x00000104, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000033, TRUE); write_client_reg(ASY_DATB, 0x00000400, TRUE); write_client_reg(ASY_DATC, 0x80000034, TRUE); write_client_reg(ASY_DATD, 0x00000306, TRUE); write_client_reg(ASY_DATE, 0x80000035, TRUE); write_client_reg(ASY_DATF, 0x00000706, TRUE); write_client_reg(ASY_DATG, 0x80000036, TRUE); write_client_reg(ASY_DATH, 0x00000707, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000037, TRUE); write_client_reg(ASY_DATB, 0x00000004, TRUE); write_client_reg(ASY_DATC, 0x80000038, TRUE); write_client_reg(ASY_DATD, 0x00000000, TRUE); write_client_reg(ASY_DATE, 0x80000039, TRUE); write_client_reg(ASY_DATF, 0x00000000, TRUE); write_client_reg(ASY_DATG, 0x8000003A, TRUE); write_client_reg(ASY_DATH, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000044, TRUE); write_client_reg(ASY_DATB, 0x0000AF00, TRUE); write_client_reg(ASY_DATC, 0x80000045, TRUE); write_client_reg(ASY_DATD, 0x0000DB00, TRUE); write_client_reg(ASY_DATE, 0x08000042, TRUE); write_client_reg(ASY_DATF, 0x0000DB00, TRUE); write_client_reg(ASY_DATG, 0x80000021, TRUE); write_client_reg(ASY_DATH, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(PXL, 0x0000000C, TRUE); write_client_reg(VSYNIF, 0x00000001, TRUE); write_client_reg(ASY_DATA, 0x80000022, TRUE); write_client_reg(ASY_CMDSET, 0x00000003, TRUE); write_client_reg(START, 0x00000001, TRUE); mddi_wait(60); write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000050, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_DATC, 0x80000051, TRUE); write_client_reg(ASY_DATD, 0x00000E00, TRUE); write_client_reg(ASY_DATE, 0x80000052, TRUE); write_client_reg(ASY_DATF, 0x00000D01, TRUE); write_client_reg(ASY_DATG, 0x80000053, TRUE); write_client_reg(ASY_DATH, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); write_client_reg(ASY_DATA, 0x80000058, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_DATC, 0x8000005A, TRUE); write_client_reg(ASY_DATD, 0x00000E01, TRUE); write_client_reg(ASY_CMDSET, 0x00000009, TRUE); write_client_reg(ASY_CMDSET, 0x00000008, TRUE); write_client_reg(ASY_DATA, 0x80000011, TRUE); write_client_reg(ASY_DATB, 0x00000812, TRUE); write_client_reg(ASY_DATC, 0x80000012, TRUE); write_client_reg(ASY_DATD, 0x00000003, TRUE); write_client_reg(ASY_DATE, 0x80000013, TRUE); write_client_reg(ASY_DATF, 0x00000909, TRUE); write_client_reg(ASY_DATG, 0x80000010, TRUE); write_client_reg(ASY_DATH, 0x00000040, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); mddi_wait(40); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000340, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(60); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00003340, TRUE); write_client_reg(ASY_DATC, 0x80000007, TRUE); write_client_reg(ASY_DATD, 0x00004007, TRUE); write_client_reg(ASY_CMDSET, 0x00000009, TRUE); write_client_reg(ASY_CMDSET, 0x00000008, TRUE); mddi_wait(1); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004017, TRUE); write_client_reg(ASY_DATC, 0x8000005B, TRUE); write_client_reg(ASY_DATD, 0x00000000, TRUE); write_client_reg(ASY_DATE, 0x80000059, TRUE); write_client_reg(ASY_DATF, 0x00000011, TRUE); write_client_reg(ASY_CMDSET, 0x0000000D, TRUE); write_client_reg(ASY_CMDSET, 0x0000000C, TRUE); mddi_wait(20); write_client_reg(ASY_DATA, 0x80000059, TRUE); /* LTPS I/F control */ write_client_reg(ASY_DATB, 0x00000019, TRUE); /* Direct cmd transfer enable */ write_client_reg(ASY_CMDSET, 0x00000005, TRUE); /* Direct cmd transfer disable */ write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(20); /* Index setting of SUB LCDD */ write_client_reg(ASY_DATA, 0x80000059, TRUE); /* LTPS I/F control */ write_client_reg(ASY_DATB, 0x00000079, TRUE); /* Direct cmd transfer enable */ write_client_reg(ASY_CMDSET, 0x00000005, TRUE); /* Direct cmd transfer disable */ write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(20); /* Index setting of SUB LCDD */ write_client_reg(ASY_DATA, 0x80000059, TRUE); /* LTPS I/F control */ write_client_reg(ASY_DATB, 0x000003FD, TRUE); /* Direct cmd transfer enable */ write_client_reg(ASY_CMDSET, 0x00000005, TRUE); /* Direct cmd transfer disable */ write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(20); mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY, TOSHIBA_STATE_SEC_NORMAL_MODE); } static void toshiba_prim_lcd_off(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) { gordon_disp_off(); } else{ /* Main panel power off (Deep standby in) */ write_client_reg(SSITX, 0x000800BC, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); write_client_reg(SSITX, 0x00000028, TRUE); mddi_wait(1); write_client_reg(SSITX, 0x000800B8, TRUE); write_client_reg(SSITX, 0x00000180, TRUE); write_client_reg(SSITX, 0x00000102, TRUE); write_client_reg(SSITX, 0x00000010, TRUE); } write_client_reg(PORT, 0x00000003, TRUE); write_client_reg(REGENB, 0x00000001, TRUE); mddi_wait(1); write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(REGENB, 0x00000001, TRUE); mddi_wait(3); if (TM_GET_PID(mfd->panel.id) != LCD_SHARP_2P4_VGA) { write_client_reg(SSITX, 0x000800B0, TRUE); write_client_reg(SSITX, 0x00000100, TRUE); } mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_NORMAL_MODE, TOSHIBA_STATE_PRIM_SEC_STANDBY); } static void toshiba_sec_lcd_off(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004016, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000019, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x0000000B, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000002, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(4); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000300, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(4); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004004, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(PORT, 0x00000000, TRUE); write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(VSYNIF, 0x00000001, TRUE); write_client_reg(PORT_ENB, 0x00000001, TRUE); write_client_reg(REGENB, 0x00000001, TRUE); mddi_toshiba_state_transition(TOSHIBA_STATE_SEC_NORMAL_MODE, TOSHIBA_STATE_PRIM_SEC_STANDBY); } static void toshiba_sec_cont_update_start(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(INTMASK, 0x00000001, TRUE); write_client_reg(TTBUSSEL, 0x0000000B, TRUE); write_client_reg(MONI, 0x00000008, TRUE); write_client_reg(CLKENB, 0x000000EF, TRUE); write_client_reg(CLKENB, 0x000010EF, TRUE); write_client_reg(CLKENB, 0x000011EF, TRUE); write_client_reg(BITMAP4, 0x00DC00B0, TRUE); write_client_reg(HCYCLE, 0x0000006B, TRUE); write_client_reg(HSW, 0x00000003, TRUE); write_client_reg(HDE_START, 0x00000002, TRUE); write_client_reg(HDE_SIZE, 0x00000057, TRUE); write_client_reg(VCYCLE, 0x000000E6, TRUE); write_client_reg(VSW, 0x00000001, TRUE); write_client_reg(VDE_START, 0x00000003, TRUE); write_client_reg(VDE_SIZE, 0x000000DB, TRUE); write_client_reg(WRSTB, 0x00000015, TRUE); write_client_reg(MPLFBUF, 0x00000004, TRUE); write_client_reg(ASY_DATA, 0x80000021, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_DATC, 0x80000022, TRUE); write_client_reg(ASY_CMDSET, 0x00000007, TRUE); write_client_reg(PXL, 0x00000089, TRUE); write_client_reg(VSYNIF, 0x00000001, TRUE); mddi_wait(2); } static void toshiba_sec_cont_update_stop(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); mddi_wait(3); write_client_reg(SRST, 0x00000002, TRUE); mddi_wait(3); write_client_reg(SRST, 0x00000003, TRUE); } static void toshiba_sec_backlight_on(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(TIMER0CTRL, 0x00000060, TRUE); write_client_reg(TIMER0LOAD, 0x00001388, TRUE); write_client_reg(PWM0OFF, 0x00000001, TRUE); write_client_reg(TIMER1CTRL, 0x00000060, TRUE); write_client_reg(TIMER1LOAD, 0x00001388, TRUE); write_client_reg(PWM1OFF, 0x00001387, TRUE); write_client_reg(TIMER0CTRL, 0x000000E0, TRUE); write_client_reg(TIMER1CTRL, 0x000000E0, TRUE); write_client_reg(PWMCR, 0x00000003, TRUE); } static void toshiba_sec_sleep_in(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004016, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000019, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x0000000B, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000002, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(4); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000300, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(4); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000000, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004004, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(PORT, 0x00000000, TRUE); write_client_reg(PXL, 0x00000000, TRUE); write_client_reg(START, 0x00000000, TRUE); write_client_reg(REGENB, 0x00000001, TRUE); /* Sleep in sequence */ write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000302, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); } static void toshiba_sec_sleep_out(struct msm_fb_data_type *mfd) { if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) return; write_client_reg(VSYNIF, 0x00000000, TRUE); write_client_reg(PORT_ENB, 0x00000002, TRUE); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000300, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); /* Display ON sequence */ write_client_reg(ASY_DATA, 0x80000011, TRUE); write_client_reg(ASY_DATB, 0x00000812, TRUE); write_client_reg(ASY_DATC, 0x80000012, TRUE); write_client_reg(ASY_DATD, 0x00000003, TRUE); write_client_reg(ASY_DATE, 0x80000013, TRUE); write_client_reg(ASY_DATF, 0x00000909, TRUE); write_client_reg(ASY_DATG, 0x80000010, TRUE); write_client_reg(ASY_DATH, 0x00000040, TRUE); write_client_reg(ASY_CMDSET, 0x00000001, TRUE); write_client_reg(ASY_CMDSET, 0x00000000, TRUE); mddi_wait(4); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00000340, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(6); write_client_reg(ASY_DATA, 0x80000010, TRUE); write_client_reg(ASY_DATB, 0x00003340, TRUE); write_client_reg(ASY_DATC, 0x80000007, TRUE); write_client_reg(ASY_DATD, 0x00004007, TRUE); write_client_reg(ASY_CMDSET, 0x00000009, TRUE); write_client_reg(ASY_CMDSET, 0x00000008, TRUE); mddi_wait(1); write_client_reg(ASY_DATA, 0x80000007, TRUE); write_client_reg(ASY_DATB, 0x00004017, TRUE); write_client_reg(ASY_DATC, 0x8000005B, TRUE); write_client_reg(ASY_DATD, 0x00000000, TRUE); write_client_reg(ASY_DATE, 0x80000059, TRUE); write_client_reg(ASY_DATF, 0x00000011, TRUE); write_client_reg(ASY_CMDSET, 0x0000000D, TRUE); write_client_reg(ASY_CMDSET, 0x0000000C, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000019, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x00000079, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); write_client_reg(ASY_DATA, 0x80000059, TRUE); write_client_reg(ASY_DATB, 0x000003FD, TRUE); write_client_reg(ASY_CMDSET, 0x00000005, TRUE); write_client_reg(ASY_CMDSET, 0x00000004, TRUE); mddi_wait(2); } static void mddi_toshiba_lcd_set_backlight(struct msm_fb_data_type *mfd) { int32 level; int ret = -EPERM; int max = mfd->panel_info.bl_max; int min = mfd->panel_info.bl_min; if (mddi_toshiba_pdata && mddi_toshiba_pdata->pmic_backlight) { ret = mddi_toshiba_pdata->pmic_backlight(mfd->bl_level); if (!ret) return; } if (ret && mddi_toshiba_pdata && mddi_toshiba_pdata->backlight_level) { level = mddi_toshiba_pdata->backlight_level(mfd->bl_level, max, min); if (level < 0) return; if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) write_client_reg(TIMER0LOAD, 0x00001388, TRUE); } else { if (!max) level = 0; else level = (mfd->bl_level * 4999) / max; } write_client_reg(PWM0OFF, level, TRUE); } static void mddi_toshiba_vsync_set_handler(msm_fb_vsync_handler_type handler, /* ISR to be executed */ void *arg) { boolean error = FALSE; unsigned long flags; /* Disable interrupts */ spin_lock_irqsave(&mddi_host_spin_lock, flags); /* INTLOCK(); */ if (mddi_toshiba_vsync_handler != NULL) { error = TRUE; } else { /* Register the handler for this particular GROUP interrupt source */ mddi_toshiba_vsync_handler = handler; mddi_toshiba_vsync_handler_arg = arg; } /* Restore interrupts */ spin_unlock_irqrestore(&mddi_host_spin_lock, flags); /* MDDI_INTFREE(); */ if (error) { MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n"); } else { /* Enable the vsync wakeup */ mddi_queue_register_write(INTMSK, 0x0000, FALSE, 0); mddi_toshiba_vsync_attempts = 1; mddi_vsync_detect_enabled = TRUE; } } /* mddi_toshiba_vsync_set_handler */ static void mddi_toshiba_lcd_vsync_detected(boolean detected) { /* static timetick_type start_time = 0; */ static struct timeval start_time; static boolean first_time = TRUE; /* uint32 mdp_cnt_val = 0; */ /* timetick_type elapsed_us; */ struct timeval now; uint32 elapsed_us; uint32 num_vsyncs; if ((detected) || (mddi_toshiba_vsync_attempts > 5)) { if ((detected) && (mddi_toshiba_monitor_refresh_value)) { /* if (start_time != 0) */ if (!first_time) { jiffies_to_timeval(jiffies, &now); elapsed_us = (now.tv_sec - start_time.tv_sec) * 1000000 + now.tv_usec - start_time.tv_usec; /* * LCD is configured for a refresh every usecs, * so to determine the number of vsyncs that * have occurred since the last measurement * add half that to the time difference and * divide by the refresh rate. */ num_vsyncs = (elapsed_us + (mddi_toshiba_usecs_per_refresh >> 1)) / mddi_toshiba_usecs_per_refresh; /* * LCD is configured for * hsyncs (rows) per * refresh cycle. Calculate new rows_per_second * value based upon these new measurements. * MDP can update with this new value. */ mddi_toshiba_rows_per_second = (mddi_toshiba_rows_per_refresh * 1000 * num_vsyncs) / (elapsed_us / 1000); } /* start_time = timetick_get(); */ first_time = FALSE; jiffies_to_timeval(jiffies, &start_time); if (mddi_toshiba_report_refresh_measurements) { (void)mddi_queue_register_read_int(VPOS, &mddi_toshiba_curr_vpos); /* mdp_cnt_val = MDP_LINE_COUNT; */ } } /* if detected = TRUE, client initiated wakeup was detected */ if (mddi_toshiba_vsync_handler != NULL) { (*mddi_toshiba_vsync_handler) (mddi_toshiba_vsync_handler_arg); mddi_toshiba_vsync_handler = NULL; } mddi_vsync_detect_enabled = FALSE; mddi_toshiba_vsync_attempts = 0; /* need to disable the interrupt wakeup */ if (!mddi_queue_register_write_int(INTMSK, 0x0001)) MDDI_MSG_ERR("Vsync interrupt disable failed!\n"); if (!detected) { /* give up after 5 failed attempts but show error */ MDDI_MSG_NOTICE("Vsync detection failed!\n"); } else if ((mddi_toshiba_monitor_refresh_value) && (mddi_toshiba_report_refresh_measurements)) { MDDI_MSG_NOTICE(" Last Line Counter=%d!\n", mddi_toshiba_curr_vpos); /* MDDI_MSG_NOTICE(" MDP Line Counter=%d!\n",mdp_cnt_val); */ MDDI_MSG_NOTICE(" Lines Per Second=%d!\n", mddi_toshiba_rows_per_second); } /* clear the interrupt */ if (!mddi_queue_register_write_int(INTFLG, 0x0001)) MDDI_MSG_ERR("Vsync interrupt clear failed!\n"); } else { /* if detected = FALSE, we woke up from hibernation, but did not * detect client initiated wakeup. */ mddi_toshiba_vsync_attempts++; } } static void mddi_toshiba_prim_init(struct msm_fb_data_type *mfd) { switch (toshiba_state) { case TOSHIBA_STATE_PRIM_SEC_READY: break; case TOSHIBA_STATE_OFF: toshiba_state = TOSHIBA_STATE_PRIM_SEC_STANDBY; toshiba_common_initial_setup(mfd); break; case TOSHIBA_STATE_PRIM_SEC_STANDBY: toshiba_common_initial_setup(mfd); break; case TOSHIBA_STATE_SEC_NORMAL_MODE: toshiba_sec_cont_update_stop(mfd); toshiba_sec_sleep_in(mfd); toshiba_sec_sleep_out(mfd); toshiba_sec_lcd_off(mfd); toshiba_common_initial_setup(mfd); break; default: MDDI_MSG_ERR("mddi_toshiba_prim_init from state %d\n", toshiba_state); } toshiba_prim_start(mfd); if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) gordon_disp_init(); mddi_host_write_pix_attr_reg(0x00C3); } static void mddi_toshiba_sec_init(struct msm_fb_data_type *mfd) { switch (toshiba_state) { case TOSHIBA_STATE_PRIM_SEC_READY: break; case TOSHIBA_STATE_PRIM_SEC_STANDBY: toshiba_common_initial_setup(mfd); break; case TOSHIBA_STATE_PRIM_NORMAL_MODE: toshiba_prim_lcd_off(mfd); toshiba_common_initial_setup(mfd); break; default: MDDI_MSG_ERR("mddi_toshiba_sec_init from state %d\n", toshiba_state); } toshiba_sec_start(mfd); toshiba_sec_backlight_on(mfd); toshiba_sec_cont_update_start(mfd); mddi_host_write_pix_attr_reg(0x0400); } static void mddi_toshiba_lcd_powerdown(struct msm_fb_data_type *mfd) { switch (toshiba_state) { case TOSHIBA_STATE_PRIM_SEC_READY: mddi_toshiba_prim_init(mfd); mddi_toshiba_lcd_powerdown(mfd); return; case TOSHIBA_STATE_PRIM_SEC_STANDBY: break; case TOSHIBA_STATE_PRIM_NORMAL_MODE: toshiba_prim_lcd_off(mfd); break; case TOSHIBA_STATE_SEC_NORMAL_MODE: toshiba_sec_cont_update_stop(mfd); toshiba_sec_sleep_in(mfd); toshiba_sec_sleep_out(mfd); toshiba_sec_lcd_off(mfd); break; default: MDDI_MSG_ERR("mddi_toshiba_lcd_powerdown from state %d\n", toshiba_state); } } static int mddi_sharpgordon_firsttime = 1; static int mddi_toshiba_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (TM_GET_DID(mfd->panel.id) == TOSHIBA_VGA_PRIM) mddi_toshiba_prim_init(mfd); else mddi_toshiba_sec_init(mfd); if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) { if (mddi_sharpgordon_firsttime) { mddi_sharpgordon_firsttime = 0; write_client_reg(REGENB, 0x00000001, TRUE); } } return 0; } static int mddi_toshiba_lcd_off(struct platform_device *pdev) { mddi_toshiba_lcd_powerdown(platform_get_drvdata(pdev)); return 0; } static int __init mddi_toshiba_lcd_probe(struct platform_device *pdev) { if (pdev->id == 0) { mddi_toshiba_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = mddi_toshiba_lcd_probe, .driver = { .name = "mddi_toshiba", }, }; static struct msm_fb_panel_data toshiba_panel_data = { .on = mddi_toshiba_lcd_on, .off = mddi_toshiba_lcd_off, }; static int ch_used[3]; int mddi_toshiba_device_register(struct msm_panel_info *pinfo, u32 channel, u32 panel) { struct platform_device *pdev = NULL; int ret; if ((channel >= 3) || ch_used[channel]) return -ENODEV; if ((channel != TOSHIBA_VGA_PRIM) && mddi_toshiba_pdata && mddi_toshiba_pdata->panel_num) if (mddi_toshiba_pdata->panel_num() < 2) return -ENODEV; ch_used[channel] = TRUE; pdev = platform_device_alloc("mddi_toshiba", (panel << 8)|channel); if (!pdev) return -ENOMEM; if (channel == TOSHIBA_VGA_PRIM) { toshiba_panel_data.set_backlight = mddi_toshiba_lcd_set_backlight; if (pinfo->lcd.vsync_enable) { toshiba_panel_data.set_vsync_notifier = mddi_toshiba_vsync_set_handler; mddi_lcd.vsync_detected = mddi_toshiba_lcd_vsync_detected; } } else { toshiba_panel_data.set_backlight = NULL; toshiba_panel_data.set_vsync_notifier = NULL; } toshiba_panel_data.panel_info = *pinfo; ret = platform_device_add_data(pdev, &toshiba_panel_data, sizeof(toshiba_panel_data)); if (ret) { printk(KERN_ERR "%s: platform_device_add_data failed!\n", __func__); goto err_device_put; } ret = platform_device_add(pdev); if (ret) { printk(KERN_ERR "%s: platform_device_register failed!\n", __func__); goto err_device_put; } return 0; err_device_put: platform_device_put(pdev); return ret; } static int __init mddi_toshiba_lcd_init(void) { return platform_driver_register(&this_driver); } module_init(mddi_toshiba_lcd_init);
gpl-2.0
Tommy-Geenexus/android_kernel_sony_msm8974_togari_5.x
arch/s390/appldata/appldata_os.c
4594
6243
/* * arch/s390/appldata/appldata_os.c * * Data gathering module for Linux-VM Monitor Stream, Stage 1. * Collects misc. OS related data (CPU utilization, running processes). * * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. * * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #define KMSG_COMPONENT "appldata" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <asm/appldata.h> #include <asm/smp.h> #include "appldata.h" #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) /* * OS data * * This is accessed as binary data by z/VM. If changes to it can't be avoided, * the structure version (product ID, see appldata_base.c) needs to be changed * as well and all documentation and z/VM applications using it must be * updated. * * The record layout is documented in the Linux for zSeries Device Drivers * book: * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml */ struct appldata_os_per_cpu { u32 per_cpu_user; /* timer ticks spent in user mode */ u32 per_cpu_nice; /* ... spent with modified priority */ u32 per_cpu_system; /* ... spent in kernel mode */ u32 per_cpu_idle; /* ... spent in idle mode */ /* New in 2.6 */ u32 per_cpu_irq; /* ... spent in interrupts */ u32 per_cpu_softirq; /* ... spent in softirqs */ u32 per_cpu_iowait; /* ... spent while waiting for I/O */ /* New in modification level 01 */ u32 per_cpu_steal; /* ... stolen by hypervisor */ u32 cpu_id; /* number of this CPU */ } __attribute__((packed)); struct appldata_os_data { u64 timestamp; u32 sync_count_1; /* after VM collected the record data, */ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the same. If not, the record has been updated on the Linux side while VM was collecting the (possibly corrupt) data */ u32 nr_cpus; /* number of (virtual) CPUs */ u32 per_cpu_size; /* size of the per-cpu data struct */ u32 cpu_offset; /* offset of the first per-cpu data struct */ u32 nr_running; /* number of runnable threads */ u32 nr_threads; /* number of threads */ u32 avenrun[3]; /* average nr. of running processes during */ /* the last 1, 5 and 15 minutes */ /* New in 2.6 */ u32 nr_iowait; /* number of blocked threads (waiting for I/O) */ /* per cpu data */ struct appldata_os_per_cpu os_cpu[0]; } __attribute__((packed)); static struct appldata_os_data *appldata_os_data; static struct appldata_ops ops = { .name = "os", .record_nr = APPLDATA_RECORD_OS_ID, .owner = THIS_MODULE, .mod_lvl = {0xF0, 0xF1}, /* EBCDIC "01" */ }; /* * appldata_get_os_data() * * gather OS data */ static void appldata_get_os_data(void *data) { int i, j, rc; struct appldata_os_data *os_data; unsigned int new_size; os_data = data; os_data->sync_count_1++; os_data->nr_threads = nr_threads; os_data->nr_running = nr_running(); os_data->nr_iowait = nr_iowait(); os_data->avenrun[0] = avenrun[0] + (FIXED_1/200); os_data->avenrun[1] = avenrun[1] + (FIXED_1/200); os_data->avenrun[2] = avenrun[2] + (FIXED_1/200); j = 0; for_each_online_cpu(i) { os_data->os_cpu[j].per_cpu_user = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); os_data->os_cpu[j].per_cpu_nice = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); os_data->os_cpu[j].per_cpu_system = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); os_data->os_cpu[j].per_cpu_idle = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); os_data->os_cpu[j].per_cpu_irq = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); os_data->os_cpu[j].per_cpu_softirq = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); os_data->os_cpu[j].per_cpu_iowait = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); os_data->os_cpu[j].per_cpu_steal = cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); os_data->os_cpu[j].cpu_id = i; j++; } os_data->nr_cpus = j; new_size = sizeof(struct appldata_os_data) + (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu)); if (ops.size != new_size) { if (ops.active) { rc = appldata_diag(APPLDATA_RECORD_OS_ID, APPLDATA_START_INTERVAL_REC, (unsigned long) ops.data, new_size, ops.mod_lvl); if (rc != 0) pr_err("Starting a new OS data collection " "failed with rc=%d\n", rc); rc = appldata_diag(APPLDATA_RECORD_OS_ID, APPLDATA_STOP_REC, (unsigned long) ops.data, ops.size, ops.mod_lvl); if (rc != 0) pr_err("Stopping a faulty OS data " "collection failed with rc=%d\n", rc); } ops.size = new_size; } os_data->timestamp = get_clock(); os_data->sync_count_2++; } /* * appldata_os_init() * * init data, register ops */ static int __init appldata_os_init(void) { int rc, max_size; max_size = sizeof(struct appldata_os_data) + (NR_CPUS * sizeof(struct appldata_os_per_cpu)); if (max_size > APPLDATA_MAX_REC_SIZE) { pr_err("Maximum OS record size %i exceeds the maximum " "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE); rc = -ENOMEM; goto out; } appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA); if (appldata_os_data == NULL) { rc = -ENOMEM; goto out; } appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, os_cpu); ops.data = appldata_os_data; ops.callback = &appldata_get_os_data; rc = appldata_register_ops(&ops); if (rc != 0) kfree(appldata_os_data); out: return rc; } /* * appldata_os_exit() * * unregister ops */ static void __exit appldata_os_exit(void) { appldata_unregister_ops(&ops); kfree(appldata_os_data); } module_init(appldata_os_init); module_exit(appldata_os_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Gerald Schaefer"); MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
gpl-2.0
c0llal0/kernel_xperia_z
sound/soc/mxs/mxs-pcm.c
4850
6286
/* * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. * * Based on sound/soc/imx/imx-pcm-dma-mx2.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dmaengine.h> #include <linux/fsl/mxs-dma.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #include "mxs-pcm.h" struct mxs_pcm_dma_data { struct mxs_dma_data dma_data; struct mxs_pcm_dma_params *dma_params; }; static struct snd_pcm_hardware snd_mxs_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_INTERLEAVED, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, .channels_min = 2, .channels_max = 2, .period_bytes_min = 32, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 52, .buffer_bytes_max = 64 * 1024, .fifo_size = 32, }; static bool filter(struct dma_chan *chan, void *param) { struct mxs_pcm_dma_data *pcm_dma_data = param; struct mxs_pcm_dma_params *dma_params = pcm_dma_data->dma_params; if (!mxs_dma_is_apbx(chan)) return false; if (chan->chan_id != dma_params->chan_num) return false; chan->private = &pcm_dma_data->dma_data; return true; } static int snd_mxs_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static int snd_mxs_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mxs_pcm_dma_data *pcm_dma_data; int ret; pcm_dma_data = kzalloc(sizeof(*pcm_dma_data), GFP_KERNEL); if (pcm_dma_data == NULL) return -ENOMEM; pcm_dma_data->dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); pcm_dma_data->dma_data.chan_irq = pcm_dma_data->dma_params->chan_irq; ret = snd_dmaengine_pcm_open(substream, filter, pcm_dma_data); if (ret) { kfree(pcm_dma_data); return ret; } snd_soc_set_runtime_hwparams(substream, &snd_mxs_hardware); snd_dmaengine_pcm_set_data(substream, pcm_dma_data); return 0; } static int snd_mxs_close(struct snd_pcm_substream *substream) { struct mxs_pcm_dma_data *pcm_dma_data = snd_dmaengine_pcm_get_data(substream); snd_dmaengine_pcm_close(substream); kfree(pcm_dma_data); return 0; } static int snd_mxs_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops mxs_pcm_ops = { .open = snd_mxs_open, .close = snd_mxs_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_mxs_pcm_hw_params, .trigger = snd_dmaengine_pcm_trigger, .pointer = snd_dmaengine_pcm_pointer, .mmap = snd_mxs_pcm_mmap, }; static int mxs_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = snd_mxs_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static u64 mxs_pcm_dmamask = DMA_BIT_MASK(32); static int mxs_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &mxs_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = mxs_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = mxs_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: return ret; } static void mxs_pcm_free(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static struct snd_soc_platform_driver mxs_soc_platform = { .ops = &mxs_pcm_ops, .pcm_new = mxs_pcm_new, .pcm_free = mxs_pcm_free, }; static int __devinit mxs_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &mxs_soc_platform); } static int __devexit mxs_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver mxs_pcm_driver = { .driver = { .name = "mxs-pcm-audio", .owner = THIS_MODULE, }, .probe = mxs_soc_platform_probe, .remove = __devexit_p(mxs_soc_platform_remove), }; module_platform_driver(mxs_pcm_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:mxs-pcm-audio");
gpl-2.0
bilalliberty/android_kernel_htc_liberty-villec2
net/ax25/ax25_out.c
4850
9085
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> static DEFINE_SPINLOCK(ax25_frag_lock); ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *ax25; /* * Take the default packet length for the device if zero is * specified. */ if (paclen == 0) { if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return NULL; paclen = ax25_dev->values[AX25_VALUES_PACLEN]; } /* * Look for an existing connection. */ if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) { ax25_output(ax25, paclen, skb); return ax25; /* It already existed */ } if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return NULL; if ((ax25 = ax25_create_cb()) == NULL) return NULL; ax25_fillin_cb(ax25, ax25_dev); ax25->source_addr = *src; ax25->dest_addr = *dest; if (digi != NULL) { ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { ax25_cb_put(ax25); return NULL; } } switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_establish_data_link(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (ax25_dev->dama.slave) ax25_ds_establish_data_link(ax25); else ax25_std_establish_data_link(ax25); break; #endif } /* * There is one ref for the state machine; a caller needs * one more to put it back, just like with the existing one. */ ax25_cb_hold(ax25); ax25_cb_add(ax25); ax25->state = AX25_STATE_1; ax25_start_heartbeat(ax25); ax25_output(ax25, paclen, skb); return ax25; /* We had to create it */ } EXPORT_SYMBOL(ax25_send_frame); /* * All outgoing AX.25 I frames pass via this routine. Therefore this is * where the fragmentation of frames takes place. If fragment is set to * zero then we are not allowed to do fragmentation, even if the frame * is too large. */ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) { struct sk_buff *skbn; unsigned char *p; int frontlen, len, fragno, ka9qfrag, first = 1; if (paclen < 16) { WARN_ON_ONCE(1); kfree_skb(skb); return; } if ((skb->len - 1) > paclen) { if (*skb->data == AX25_P_TEXT) { skb_pull(skb, 1); /* skip PID */ ka9qfrag = 0; } else { paclen -= 2; /* Allow for fragment control info */ ka9qfrag = 1; } fragno = skb->len / paclen; if (skb->len % paclen == 0) fragno--; frontlen = skb_headroom(skb); /* Address space + CTRL */ while (skb->len > 0) { spin_lock_bh(&ax25_frag_lock); if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) { spin_unlock_bh(&ax25_frag_lock); printk(KERN_CRIT "AX.25: ax25_output - out of memory\n"); return; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); spin_unlock_bh(&ax25_frag_lock); len = (paclen > skb->len) ? skb->len : paclen; if (ka9qfrag == 1) { skb_reserve(skbn, frontlen + 2); skb_set_network_header(skbn, skb_network_offset(skb)); skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 2); *p++ = AX25_P_SEGMENT; *p = fragno--; if (first) { *p |= AX25_SEG_FIRST; first = 0; } } else { skb_reserve(skbn, frontlen + 1); skb_set_network_header(skbn, skb_network_offset(skb)); skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; } skb_pull(skb, len); skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */ } kfree_skb(skb); } else { skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ } switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_kick(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE /* * A DAMA slave is _required_ to work as normal AX.25L2V2 * if no DAMA master is available. */ case AX25_PROTO_DAMA_SLAVE: if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25); break; #endif } } /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) { unsigned char *frame; if (skb == NULL) return; skb_reset_network_header(skb); if (ax25->modulus == AX25_MODULUS) { frame = skb_push(skb, 1); *frame = AX25_I; *frame |= (poll_bit) ? AX25_PF : 0; *frame |= (ax25->vr << 5); *frame |= (ax25->vs << 1); } else { frame = skb_push(skb, 2); frame[0] = AX25_I; frame[0] |= (ax25->vs << 1); frame[1] = (poll_bit) ? AX25_EPF : 0; frame[1] |= (ax25->vr << 1); } ax25_start_idletimer(ax25); ax25_transmit_buffer(ax25, skb, AX25_COMMAND); } void ax25_kick(ax25_cb *ax25) { struct sk_buff *skb, *skbn; int last = 1; unsigned short start, end, next; if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4) return; if (ax25->condition & AX25_COND_PEER_RX_BUSY) return; if (skb_peek(&ax25->write_queue) == NULL) return; start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs; end = (ax25->va + ax25->window) % ax25->modulus; if (start == end) return; /* * Transmit data until either we're out of data to send or * the window is full. Send a poll on the final I frame if * the window is filled. */ /* * Dequeue the frame and copy it. * Check for race with ax25_clear_queues(). */ skb = skb_dequeue(&ax25->write_queue); if (!skb) return; ax25->vs = start; do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&ax25->write_queue, skb); break; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); next = (ax25->vs + 1) % ax25->modulus; last = (next == end); /* * Transmit the frame copy. * bke 960114: do not set the Poll bit on the last frame * in DAMA mode. */ switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25_send_iframe(ax25, skbn, AX25_POLLOFF); break; #endif } ax25->vs = next; /* * Requeue the original data frame. */ skb_queue_tail(&ax25->ack_queue, skb); } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); ax25->condition &= ~AX25_COND_ACK_PENDING; if (!ax25_t1timer_running(ax25)) { ax25_stop_t3timer(ax25); ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); } } void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) { struct sk_buff *skbn; unsigned char *ptr; int headroom; if (ax25->ax25_dev == NULL) { ax25_disconnect(ax25, ENETUNREACH); return; } headroom = ax25_addr_size(ax25->digipeat); if (skb_headroom(skb) < headroom) { if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) { printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n"); kfree_skb(skb); return; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); kfree_skb(skb); skb = skbn; } ptr = skb_push(skb, headroom); ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus); ax25_queue_xmit(skb, ax25->ax25_dev->dev); } /* * A small shim to dev_queue_xmit to add the KISS control byte, and do * any packet forwarding in operation. */ void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned char *ptr; skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev)); ptr = skb_push(skb, 1); *ptr = 0x00; /* KISS */ dev_queue_xmit(skb); } int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr) { if (ax25->vs == nr) { ax25_frames_acked(ax25, nr); ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); return 1; } else { if (ax25->va != nr) { ax25_frames_acked(ax25, nr); ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); return 1; } } return 0; }
gpl-2.0
invisiblek/android_kernel_htc_msm8960
net/sched/sch_mq.c
4850
5771
/* * net/sched/sch_mq.c Classful multiqueue dummy scheduler * * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct mq_sched { struct Qdisc **qdiscs; }; static void mq_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); unsigned int ntx; if (!priv->qdiscs) return; for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) qdisc_destroy(priv->qdiscs[ntx]); kfree(priv->qdiscs); } static int mq_init(struct Qdisc *sch, struct nlattr *opt) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct netdev_queue *dev_queue; struct Qdisc *qdisc; unsigned int ntx; if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; /* pre-allocate qdiscs, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); if (priv->qdiscs == NULL) return -ENOMEM; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { dev_queue = netdev_get_tx_queue(dev, ntx); qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); if (qdisc == NULL) goto err; priv->qdiscs[ntx] = qdisc; } sch->flags |= TCQ_F_MQROOT; return 0; err: mq_destroy(sch); return -ENOMEM; } static void mq_attach(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct Qdisc *qdisc; unsigned int ntx; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = priv->qdiscs[ntx]; qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (qdisc) qdisc_destroy(qdisc); } kfree(priv->qdiscs); priv->qdiscs = NULL; } static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); struct Qdisc *qdisc; unsigned int ntx; sch->q.qlen = 0; memset(&sch->bstats, 0, sizeof(sch->bstats)); memset(&sch->qstats, 0, sizeof(sch->qstats)); for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; spin_lock_bh(qdisc_lock(qdisc)); sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; sch->bstats.packets += qdisc->bstats.packets; sch->qstats.qlen += qdisc->qstats.qlen; sch->qstats.backlog += qdisc->qstats.backlog; sch->qstats.drops += qdisc->qstats.drops; sch->qstats.requeues += qdisc->qstats.requeues; sch->qstats.overlimits += qdisc->qstats.overlimits; spin_unlock_bh(qdisc_lock(qdisc)); } return 0; } static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) { struct net_device *dev = qdisc_dev(sch); unsigned long ntx = cl - 1; if (ntx >= dev->num_tx_queues) return NULL; return netdev_get_tx_queue(dev, ntx); } static struct netdev_queue *mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm) { unsigned int ntx = TC_H_MIN(tcm->tcm_parent); struct netdev_queue *dev_queue = mq_queue_get(sch, ntx); if (!dev_queue) { struct net_device *dev = qdisc_dev(sch); return netdev_get_tx_queue(dev, 0); } return dev_queue; } static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, struct Qdisc **old) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); struct net_device *dev = qdisc_dev(sch); if (dev->flags & IFF_UP) dev_deactivate(dev); *old = dev_graft_qdisc(dev_queue, new); if (dev->flags & IFF_UP) dev_activate(dev); return 0; } static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); return dev_queue->qdisc_sleeping; } static unsigned long mq_get(struct Qdisc *sch, u32 classid) { unsigned int ntx = TC_H_MIN(classid); if (!mq_queue_get(sch, ntx)) return 0; return ntx; } static void mq_put(struct Qdisc *sch, unsigned long cl) { } static int mq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = dev_queue->qdisc_sleeping->handle; return 0; } static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; sch->qstats.qlen = sch->q.qlen; if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || gnet_stats_copy_queue(d, &sch->qstats) < 0) return -1; return 0; } static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct net_device *dev = qdisc_dev(sch); unsigned int ntx; if (arg->stop) return; arg->count = arg->skip; for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { if (arg->fn(sch, ntx + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static const struct Qdisc_class_ops mq_class_ops = { .select_queue = mq_select_queue, .graft = mq_graft, .leaf = mq_leaf, .get = mq_get, .put = mq_put, .walk = mq_walk, .dump = mq_dump_class, .dump_stats = mq_dump_class_stats, }; struct Qdisc_ops mq_qdisc_ops __read_mostly = { .cl_ops = &mq_class_ops, .id = "mq", .priv_size = sizeof(struct mq_sched), .init = mq_init, .destroy = mq_destroy, .attach = mq_attach, .dump = mq_dump, .owner = THIS_MODULE, };
gpl-2.0
araca/Zen-Kernel-Huawei-P7
arch/arm/mach-imx/mach-pcm037_eet.c
7666
4339
/* * Copyright (C) 2009 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <mach/common.h> #include <mach/iomux-mx3.h> #include <asm/mach-types.h> #include "pcm037.h" #include "devices-imx31.h" static unsigned int pcm037_eet_pins[] = { /* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */ IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO), /* GPIO keys */ IOMUX_MODE(MX31_PIN_GPIO1_0, IOMUX_CONFIG_GPIO), /* 0 */ IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), /* 1 */ IOMUX_MODE(MX31_PIN_GPIO1_2, IOMUX_CONFIG_GPIO), /* 2 */ IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO), /* 3 */ IOMUX_MODE(MX31_PIN_SVEN0, IOMUX_CONFIG_GPIO), /* 32 */ IOMUX_MODE(MX31_PIN_STX0, IOMUX_CONFIG_GPIO), /* 33 */ IOMUX_MODE(MX31_PIN_SRX0, IOMUX_CONFIG_GPIO), /* 34 */ IOMUX_MODE(MX31_PIN_SIMPD0, IOMUX_CONFIG_GPIO), /* 35 */ IOMUX_MODE(MX31_PIN_RTS1, IOMUX_CONFIG_GPIO), /* 38 */ IOMUX_MODE(MX31_PIN_CTS1, IOMUX_CONFIG_GPIO), /* 39 */ IOMUX_MODE(MX31_PIN_KEY_ROW4, IOMUX_CONFIG_GPIO), /* 50 */ IOMUX_MODE(MX31_PIN_KEY_ROW5, IOMUX_CONFIG_GPIO), /* 51 */ IOMUX_MODE(MX31_PIN_KEY_ROW6, IOMUX_CONFIG_GPIO), /* 52 */ IOMUX_MODE(MX31_PIN_KEY_ROW7, IOMUX_CONFIG_GPIO), /* 53 */ /* LEDs */ IOMUX_MODE(MX31_PIN_DTR_DTE1, IOMUX_CONFIG_GPIO), /* 44 */ IOMUX_MODE(MX31_PIN_DSR_DTE1, IOMUX_CONFIG_GPIO), /* 45 */ IOMUX_MODE(MX31_PIN_KEY_COL5, IOMUX_CONFIG_GPIO), /* 55 */ IOMUX_MODE(MX31_PIN_KEY_COL6, IOMUX_CONFIG_GPIO), /* 56 */ }; /* SPI */ static struct spi_board_info pcm037_spi_dev[] = { { .modalias = "dac124s085", .max_speed_hz = 400000, .bus_num = 0, .chip_select = 0, /* Index in pcm037_spi1_cs[] */ .mode = SPI_CPHA, }, }; /* Platform Data for MXC CSPI */ static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; static const struct spi_imx_master pcm037_spi1_pdata __initconst = { .chipselect = pcm037_spi1_cs, .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), }; /* GPIO-keys input device */ static struct gpio_keys_button pcm037_gpio_keys[] = { { .type = EV_KEY, .code = KEY_L, .gpio = 0, .desc = "Wheel Manual", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_A, .gpio = 1, .desc = "Wheel AF", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_V, .gpio = 2, .desc = "Wheel View", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_M, .gpio = 3, .desc = "Wheel Menu", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_UP, .gpio = 32, .desc = "Nav Pad Up", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_RIGHT, .gpio = 33, .desc = "Nav Pad Right", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_DOWN, .gpio = 34, .desc = "Nav Pad Down", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_LEFT, .gpio = 35, .desc = "Nav Pad Left", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_ENTER, .gpio = 38, .desc = "Nav Pad Ok", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_O, .gpio = 39, .desc = "Wheel Off", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_FORWARD, .gpio = 50, .desc = "Focus Forward", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_BACK, .gpio = 51, .desc = "Focus Backward", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_MIDDLE, .gpio = 52, .desc = "Release Half", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_EXTRA, .gpio = 53, .desc = "Release Full", .wakeup = 0, }, }; static const struct gpio_keys_platform_data pcm037_gpio_keys_platform_data __initconst = { .buttons = pcm037_gpio_keys, .nbuttons = ARRAY_SIZE(pcm037_gpio_keys), .rep = 0, /* No auto-repeat */ }; static int __init eet_init_devices(void) { if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) return 0; mxc_iomux_setup_multiple_pins(pcm037_eet_pins, ARRAY_SIZE(pcm037_eet_pins), "pcm037_eet"); /* SPI */ spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev)); imx31_add_spi_imx0(&pcm037_spi1_pdata); imx_add_gpio_keys(&pcm037_gpio_keys_platform_data); return 0; } late_initcall(eet_init_devices);
gpl-2.0
bq/linux-e60q22
arch/mips/pmc-sierra/msp71xx/msp_prom.c
8690
11614
/* * BRIEF MODULE DESCRIPTION * PROM library initialisation code, assuming a version of * pmon is the boot code. * * Copyright 2000,2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This file was derived from Carsten Langgaard's * arch/mips/mips-boards/xx files. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm-generic/sections.h> #include <asm/page.h> #include <msp_prom.h> #include <msp_regs.h> /* global PROM environment variables and pointers */ int prom_argc; char **prom_argv, **prom_envp; int *prom_vec; /* debug flag */ int init_debug = 1; /* memory blocks */ struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; /* default feature sets */ static char msp_default_features[] = #if defined(CONFIG_PMC_MSP4200_EVAL) \ || defined(CONFIG_PMC_MSP4200_GW) "ERER"; #elif defined(CONFIG_PMC_MSP7120_EVAL) \ || defined(CONFIG_PMC_MSP7120_GW) "EMEMSP"; #elif defined(CONFIG_PMC_MSP7120_FPGA) "EMEM"; #endif /* conversion functions */ static inline unsigned char str2hexnum(unsigned char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; return 0; /* foo */ } static inline int str2eaddr(unsigned char *ea, unsigned char *str) { int index = 0; unsigned char num = 0; while (*str != '\0') { if ((*str == '.') || (*str == ':')) { ea[index++] = num; num = 0; str++; } else { num = num << 4; num |= str2hexnum(*str++); } } if (index == 5) { ea[index++] = num; return 0; } else return -1; } EXPORT_SYMBOL(str2eaddr); static inline unsigned long str2hex(unsigned char *str) { int value = 0; while (*str) { value = value << 4; value |= str2hexnum(*str++); } return value; } /* function to query the system information */ const char *get_system_type(void) { #if defined(CONFIG_PMC_MSP4200_EVAL) return "PMC-Sierra MSP4200 Eval Board"; #elif defined(CONFIG_PMC_MSP4200_GW) return "PMC-Sierra MSP4200 VoIP Gateway"; #elif defined(CONFIG_PMC_MSP7120_EVAL) return "PMC-Sierra MSP7120 Eval Board"; #elif defined(CONFIG_PMC_MSP7120_GW) return "PMC-Sierra MSP7120 Residential Gateway"; #elif defined(CONFIG_PMC_MSP7120_FPGA) return "PMC-Sierra MSP7120 FPGA"; #else #error "What is the type of *your* MSP?" #endif } int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr) { char *ethaddr_str; ethaddr_str = prom_getenv(ethaddr_name); if (!ethaddr_str) { printk(KERN_WARNING "%s not set in boot prom\n", ethaddr_name); return -1; } if (str2eaddr(ethernet_addr, ethaddr_str) == -1) { printk(KERN_WARNING "%s badly formatted-<%s>\n", ethaddr_name, ethaddr_str); return -1; } if (init_debug > 1) { int i; printk(KERN_DEBUG "get_ethernet_addr: for %s ", ethaddr_name); for (i = 0; i < 5; i++) printk(KERN_DEBUG "%02x:", (unsigned char)*(ethernet_addr+i)); printk(KERN_DEBUG "%02x\n", *(ethernet_addr+i)); } return 0; } EXPORT_SYMBOL(get_ethernet_addr); static char *get_features(void) { char *feature = prom_getenv(FEATURES); if (feature == NULL) { /* default features based on MACHINE_TYPE */ feature = msp_default_features; } return feature; } static char test_feature(char c) { char *feature = get_features(); while (*feature) { if (*feature++ == c) return *feature; feature++; } return FEATURE_NOEXIST; } unsigned long get_deviceid(void) { char *deviceid = prom_getenv(DEVICEID); if (deviceid == NULL) return *DEV_ID_REG; else return str2hex(deviceid); } char identify_pci(void) { return test_feature(PCI_KEY); } EXPORT_SYMBOL(identify_pci); char identify_pcimux(void) { return test_feature(PCIMUX_KEY); } char identify_sec(void) { return test_feature(SEC_KEY); } EXPORT_SYMBOL(identify_sec); char identify_spad(void) { return test_feature(SPAD_KEY); } EXPORT_SYMBOL(identify_spad); char identify_tdm(void) { return test_feature(TDM_KEY); } EXPORT_SYMBOL(identify_tdm); char identify_zsp(void) { return test_feature(ZSP_KEY); } EXPORT_SYMBOL(identify_zsp); static char identify_enetfeature(char key, unsigned long interface_num) { char *feature = get_features(); while (*feature) { if (*feature++ == key && interface_num-- == 0) return *feature; feature++; } return FEATURE_NOEXIST; } char identify_enet(unsigned long interface_num) { return identify_enetfeature(ENET_KEY, interface_num); } EXPORT_SYMBOL(identify_enet); char identify_enetTxD(unsigned long interface_num) { return identify_enetfeature(ENETTXD_KEY, interface_num); } EXPORT_SYMBOL(identify_enetTxD); unsigned long identify_family(void) { unsigned long deviceid; deviceid = get_deviceid(); return deviceid & CPU_DEVID_FAMILY; } EXPORT_SYMBOL(identify_family); unsigned long identify_revision(void) { unsigned long deviceid; deviceid = get_deviceid(); return deviceid & CPU_DEVID_REVISION; } EXPORT_SYMBOL(identify_revision); /* PROM environment functions */ char *prom_getenv(char *env_name) { /* * Return a pointer to the given environment variable. prom_envp * points to a null terminated array of pointers to variables. * Environment variables are stored in the form of "memsize=64" */ char **var = prom_envp; int i = strlen(env_name); while (*var) { if (strncmp(env_name, *var, i) == 0) { return (*var + strlen(env_name) + 1); } var++; } return NULL; } /* PROM commandline functions */ void __init prom_init_cmdline(void) { char *cp; int actr; actr = 1; /* Always ignore argv[0] */ cp = &(arcs_cmdline[0]); while (actr < prom_argc) { strcpy(cp, prom_argv[actr]); cp += strlen(prom_argv[actr]); *cp++ = ' '; actr++; } if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */ --cp; *cp = '\0'; } /* memory allocation functions */ static int __init prom_memtype_classify(unsigned int type) { switch (type) { case yamon_free: return BOOT_MEM_RAM; case yamon_prom: return BOOT_MEM_ROM_DATA; default: return BOOT_MEM_RESERVED; } } void __init prom_meminit(void) { struct prom_pmemblock *p; p = prom_getmdesc(); while (p->size) { long type; unsigned long base, size; type = prom_memtype_classify(p->type); base = p->base; size = p->size; add_memory_region(base, size, type); p++; } } void __init prom_free_prom_memory(void) { int argc; char **argv; char **envp; char *ptr; int len = 0; int i; unsigned long addr; /* * preserve environment variables and command line from pmon/bbload * first preserve the command line */ for (argc = 0; argc < prom_argc; argc++) { len += sizeof(char *); /* length of pointer */ len += strlen(prom_argv[argc]) + 1; /* length of string */ } len += sizeof(char *); /* plus length of null pointer */ argv = kmalloc(len, GFP_KERNEL); ptr = (char *) &argv[prom_argc + 1]; /* strings follow array */ for (argc = 0; argc < prom_argc; argc++) { argv[argc] = ptr; strcpy(ptr, prom_argv[argc]); ptr += strlen(prom_argv[argc]) + 1; } argv[prom_argc] = NULL; /* end array with null pointer */ prom_argv = argv; /* next preserve the environment variables */ len = 0; i = 0; for (envp = prom_envp; *envp != NULL; envp++) { i++; /* count number of environment variables */ len += sizeof(char *); /* length of pointer */ len += strlen(*envp) + 1; /* length of string */ } len += sizeof(char *); /* plus length of null pointer */ envp = kmalloc(len, GFP_KERNEL); ptr = (char *) &envp[i+1]; for (argc = 0; argc < i; argc++) { envp[argc] = ptr; strcpy(ptr, prom_envp[argc]); ptr += strlen(prom_envp[argc]) + 1; } envp[i] = NULL; /* end array with null pointer */ prom_envp = envp; for (i = 0; i < boot_mem_map.nr_map; i++) { if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) continue; addr = boot_mem_map.map[i].addr; free_init_pages("prom memory", addr, addr + boot_mem_map.map[i].size); } } struct prom_pmemblock *__init prom_getmdesc(void) { static char memsz_env[] __initdata = "memsize"; static char heaptop_env[] __initdata = "heaptop"; char *str; unsigned int memsize; unsigned int heaptop; int i; str = prom_getenv(memsz_env); if (!str) { ppfinit("memsize not set in boot prom, " "set to default (32Mb)\n"); memsize = 0x02000000; } else { memsize = simple_strtol(str, NULL, 0); if (memsize == 0) { /* if memsize is a bad size, use reasonable default */ memsize = 0x02000000; } /* convert to physical address (removing caching bits, etc) */ memsize = CPHYSADDR(memsize); } str = prom_getenv(heaptop_env); if (!str) { heaptop = CPHYSADDR((u32)&_text); ppfinit("heaptop not set in boot prom, " "set to default 0x%08x\n", heaptop); } else { heaptop = simple_strtol(str, NULL, 16); if (heaptop == 0) { /* heaptop conversion bad, might have 0xValue */ heaptop = simple_strtol(str, NULL, 0); if (heaptop == 0) { /* heaptop still bad, use reasonable default */ heaptop = CPHYSADDR((u32)&_text); } } /* convert to physical address (removing caching bits, etc) */ heaptop = CPHYSADDR((u32)heaptop); } /* the base region */ i = 0; mdesc[i].type = BOOT_MEM_RESERVED; mdesc[i].base = 0x00000000; mdesc[i].size = PAGE_ALIGN(0x300 + 0x80); /* jtag interrupt vector + sizeof vector */ /* PMON data */ if (heaptop > mdesc[i].base + mdesc[i].size) { i++; /* 1 */ mdesc[i].type = BOOT_MEM_ROM_DATA; mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size; mdesc[i].size = heaptop - mdesc[i].base; } /* end of PMON data to start of kernel -- probably zero .. */ if (heaptop != CPHYSADDR((u32)_text)) { i++; /* 2 */ mdesc[i].type = BOOT_MEM_RAM; mdesc[i].base = heaptop; mdesc[i].size = CPHYSADDR((u32)_text) - mdesc[i].base; } /* kernel proper */ i++; /* 3 */ mdesc[i].type = BOOT_MEM_RESERVED; mdesc[i].base = CPHYSADDR((u32)_text); mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base; /* Remainder of RAM -- under memsize */ i++; /* 5 */ mdesc[i].type = yamon_free; mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size; mdesc[i].size = memsize - mdesc[i].base; return &mdesc[0]; }
gpl-2.0
CalcProgrammer1/archos-gen8-kernel-3.2.1
arch/arm/mach-pxa/clock.c
9202
1532
/* * linux/arch/arm/mach-sa1100/clock.c */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/clkdev.h> #include "clock.h" static DEFINE_SPINLOCK(clocks_lock); int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); if (clk->enabled++ == 0) clk->ops->enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); if (clk->delay) udelay(clk->delay); return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; WARN_ON(clk->enabled == 0); spin_lock_irqsave(&clocks_lock, flags); if (--clk->enabled == 0) clk->ops->disable(clk); spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long rate; rate = clk->rate; if (clk->ops->getrate) rate = clk->ops->getrate(clk); return rate; } EXPORT_SYMBOL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; int ret = -EINVAL; if (clk->ops->setrate) { spin_lock_irqsave(&clocks_lock, flags); ret = clk->ops->setrate(clk, rate); spin_unlock_irqrestore(&clocks_lock, flags); } return ret; } EXPORT_SYMBOL(clk_set_rate); void clk_dummy_enable(struct clk *clk) { } void clk_dummy_disable(struct clk *clk) { } const struct clkops clk_dummy_ops = { .enable = clk_dummy_enable, .disable = clk_dummy_disable, }; struct clk clk_dummy = { .ops = &clk_dummy_ops, };
gpl-2.0
championswimmer/android_kernel_sony_huashan
arch/blackfin/kernel/early_printk.c
9202
6140
/* * allow a console to be used for early printk * derived from arch/x86/kernel/early_printk.c * * Copyright 2007-2009 Analog Devices Inc. * * Licensed under the GPL-2 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/console.h> #include <linux/string.h> #include <linux/reboot.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <asm/early_printk.h> #ifdef CONFIG_SERIAL_BFIN extern struct console *bfin_earlyserial_init(unsigned int port, unsigned int cflag); #endif #ifdef CONFIG_BFIN_JTAG_COMM extern struct console *bfin_jc_early_init(void); #endif static struct console *early_console; /* Default console */ #define DEFAULT_PORT 0 #define DEFAULT_CFLAG CS8|B57600 /* Default console for early crashes */ #define DEFAULT_EARLY_PORT "serial,uart0,57600" #ifdef CONFIG_SERIAL_CORE /* What should get here is "0,57600" */ static struct console * __init earlyserial_init(char *buf) { int baud, bit; char parity; unsigned int serial_port = DEFAULT_PORT; unsigned int cflag = DEFAULT_CFLAG; serial_port = simple_strtoul(buf, &buf, 10); buf++; cflag = 0; baud = simple_strtoul(buf, &buf, 10); switch (baud) { case 1200: cflag |= B1200; break; case 2400: cflag |= B2400; break; case 4800: cflag |= B4800; break; case 9600: cflag |= B9600; break; case 19200: cflag |= B19200; break; case 38400: cflag |= B38400; break; case 115200: cflag |= B115200; break; default: cflag |= B57600; } parity = buf[0]; buf++; switch (parity) { case 'e': cflag |= PARENB; break; case 'o': cflag |= PARODD; break; } bit = simple_strtoul(buf, &buf, 10); switch (bit) { case 5: cflag |= CS5; break; case 6: cflag |= CS6; break; case 7: cflag |= CS7; break; default: cflag |= CS8; } #ifdef CONFIG_SERIAL_BFIN return bfin_earlyserial_init(serial_port, cflag); #else return NULL; #endif } #endif int __init setup_early_printk(char *buf) { /* Crashing in here would be really bad, so check both the var and the pointer before we start using it */ if (!buf) return 0; if (!*buf) return 0; if (early_console != NULL) return 0; #ifdef CONFIG_SERIAL_BFIN /* Check for Blackfin Serial */ if (!strncmp(buf, "serial,uart", 11)) { buf += 11; early_console = earlyserial_init(buf); } #endif #ifdef CONFIG_BFIN_JTAG_COMM /* Check for Blackfin JTAG */ if (!strncmp(buf, "jtag", 4)) { buf += 4; early_console = bfin_jc_early_init(); } #endif #ifdef CONFIG_FB /* TODO: add framebuffer console support */ #endif if (likely(early_console)) { early_console->flags |= CON_BOOT; register_console(early_console); printk(KERN_INFO "early printk enabled on %s%d\n", early_console->name, early_console->index); } return 0; } /* * Set up a temporary Event Vector Table, so if something bad happens before * the kernel is fully started, it doesn't vector off into somewhere we don't * know */ asmlinkage void __init init_early_exception_vectors(void) { u32 evt; SSYNC(); /* * This starts up the shadow buffer, incase anything crashes before * setup arch */ mark_shadow_error(); early_shadow_puts(linux_banner); early_shadow_stamp(); if (CPUID != bfin_cpuid()) { early_shadow_puts("Running on wrong machine type, expected"); early_shadow_reg(CPUID, 16); early_shadow_puts(", but running on"); early_shadow_reg(bfin_cpuid(), 16); early_shadow_puts("\n"); } /* cannot program in software: * evt0 - emulation (jtag) * evt1 - reset */ for (evt = EVT2; evt <= EVT15; evt += 4) bfin_write32(evt, early_trap); CSYNC(); /* Set all the return from interrupt, exception, NMI to a known place * so if we do a RETI, RETX or RETN by mistake - we go somewhere known * Note - don't change RETS - we are in a subroutine, or * RETE - since it might screw up if emulator is attached */ asm("\tRETI = %0; RETX = %0; RETN = %0;\n" : : "p"(early_trap)); } __attribute__((__noreturn__)) asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) { /* This can happen before the uart is initialized, so initialize * the UART now (but only if we are running on the processor we think * we are compiled for - otherwise we write to MMRs that don't exist, * and cause other problems. Nothing comes out the UART, but it does * end up in the __buf_log. */ if (likely(early_console == NULL) && CPUID == bfin_cpuid()) setup_early_printk(DEFAULT_EARLY_PORT); if (!shadow_console_enabled()) { /* crap - we crashed before setup_arch() */ early_shadow_puts("panic before setup_arch\n"); early_shadow_puts("IPEND:"); early_shadow_reg(fp->ipend, 16); if (fp->seqstat & SEQSTAT_EXCAUSE) { early_shadow_puts("\nEXCAUSE:"); early_shadow_reg(fp->seqstat & SEQSTAT_EXCAUSE, 8); } if (fp->seqstat & SEQSTAT_HWERRCAUSE) { early_shadow_puts("\nHWERRCAUSE:"); early_shadow_reg( (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14, 8); } early_shadow_puts("\nErr @"); if (fp->ipend & EVT_EVX) early_shadow_reg(fp->retx, 32); else early_shadow_reg(fp->pc, 32); #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON early_shadow_puts("\nTrace:"); if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { while (bfin_read_TBUFSTAT() & TBUFCNT) { early_shadow_puts("\nT :"); early_shadow_reg(bfin_read_TBUF(), 32); early_shadow_puts("\n S :"); early_shadow_reg(bfin_read_TBUF(), 32); } } #endif early_shadow_puts("\nUse bfin-elf-addr2line to determine " "function names\n"); /* * We should panic(), but we can't - since panic calls printk, * and printk uses memcpy. * we want to reboot, but if the machine type is different, * can't due to machine specific reboot sequences */ if (CPUID == bfin_cpuid()) { early_shadow_puts("Trying to restart\n"); machine_restart(""); } early_shadow_puts("Halting, since it is not safe to restart\n"); while (1) asm volatile ("EMUEXCPT; IDLE;\n"); } else { printk(KERN_EMERG "Early panic\n"); show_regs(fp); dump_bfin_trace_buffer(); } panic("Died early"); } early_param("earlyprintk", setup_early_printk);
gpl-2.0
Planet15/linux
arch/arm/common/locomo.c
12018
24146
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(unsigned int irq, struct irq_desc *desc) { struct locomo *lchip = irq_get_chip_data(irq); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler(lchip->irq, NULL); irq_set_handler_data(lchip->irq, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
bluechiptechnology/linux-bctrx3
drivers/mtd/maps/sun_uflash.c
243
3648
/* sun_uflash.c - Driver for user-programmable flash on * Sun Microsystems SME boardsets. * * This driver does NOT provide access to the OBP-flash for * safety reasons-- use <linux>/drivers/sbus/char/flash.c instead. * * Copyright (c) 2001 Eric Brower (ebrower@usa.net) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #define UFLASH_OBPNAME "flashprom" #define DRIVER_NAME "sun_uflash" #define PFX DRIVER_NAME ": " #define UFLASH_WINDOW_SIZE 0x200000 #define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */ MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets"); MODULE_SUPPORTED_DEVICE(DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION("2.1"); struct uflash_dev { const char *name; /* device name */ struct map_info map; /* mtd map info */ struct mtd_info *mtd; /* mtd info */ }; struct map_info uflash_map_templ = { .name = "SUNW,???-????", .size = UFLASH_WINDOW_SIZE, .bankwidth = UFLASH_BUSWIDTH, }; int uflash_devinit(struct platform_device *op, struct device_node *dp) { struct uflash_dev *up; if (op->resource[1].flags) { /* Non-CFI userflash device-- once I find one we * can work on supporting it. */ printk(KERN_ERR PFX "Unsupported device at %s, 0x%llx\n", dp->full_name, (unsigned long long)op->resource[0].start); return -ENODEV; } up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL); if (!up) { printk(KERN_ERR PFX "Cannot allocate struct uflash_dev\n"); return -ENOMEM; } /* copy defaults and tweak parameters */ memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ)); up->map.size = resource_size(&op->resource[0]); up->name = of_get_property(dp, "model", NULL); if (up->name && 0 < strlen(up->name)) up->map.name = up->name; up->map.phys = op->resource[0].start; up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size, DRIVER_NAME); if (!up->map.virt) { printk(KERN_ERR PFX "Failed to map device.\n"); kfree(up); return -EINVAL; } simple_map_init(&up->map); /* MTD registration */ up->mtd = do_map_probe("cfi_probe", &up->map); if (!up->mtd) { of_iounmap(&op->resource[0], up->map.virt, up->map.size); kfree(up); return -ENXIO; } up->mtd->owner = THIS_MODULE; mtd_device_register(up->mtd, NULL, 0); dev_set_drvdata(&op->dev, up); return 0; } static int uflash_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; /* Flashprom must have the "user" property in order to * be used by this driver. */ if (!of_find_property(dp, "user", NULL)) return -ENODEV; return uflash_devinit(op, dp); } static int uflash_remove(struct platform_device *op) { struct uflash_dev *up = dev_get_drvdata(&op->dev); if (up->mtd) { mtd_device_unregister(up->mtd); map_destroy(up->mtd); } if (up->map.virt) { of_iounmap(&op->resource[0], up->map.virt, up->map.size); up->map.virt = NULL; } kfree(up); return 0; } static const struct of_device_id uflash_match[] = { { .name = UFLASH_OBPNAME, }, {}, }; MODULE_DEVICE_TABLE(of, uflash_match); static struct platform_driver uflash_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = uflash_match, }, .probe = uflash_probe, .remove = uflash_remove, }; module_platform_driver(uflash_driver);
gpl-2.0
Phoenix-Silver/ZTE-Blade-2.6.35.10
mm/slab.c
499
121166
/* * linux/mm/slab.c * Written by Mark Hemment, 1996/97. * (markhe@nextd.demon.co.uk) * * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli * * Major cleanup, different bufctl logic, per-cpu arrays * (c) 2000 Manfred Spraul * * Cleanup, make the head arrays unconditional, preparation for NUMA * (c) 2002 Manfred Spraul * * An implementation of the Slab Allocator as described in outline in; * UNIX Internals: The New Frontiers by Uresh Vahalia * Pub: Prentice Hall ISBN 0-13-101908-2 * or with a little more detail in; * The Slab Allocator: An Object-Caching Kernel Memory Allocator * Jeff Bonwick (Sun Microsystems). * Presented at: USENIX Summer 1994 Technical Conference * * The memory is organized in caches, one cache for each object type. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) * Each cache consists out of many slabs (they are small (usually one * page long) and always contiguous), and each slab contains multiple * initialized objects. * * This means, that your constructor is used only for newly allocated * slabs and you must pass objects with the same initializations to * kmem_cache_free. * * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, * normal). If you need a special memory type, then must create a new * cache for that memory type. * * In order to reduce fragmentation, the slabs are sorted in 3 groups: * full slabs with 0 free objects * partial slabs * empty slabs with no allocated objects * * If partial slabs exist, then new allocations come from these slabs, * otherwise from empty slabs or new slabs are allocated. * * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache * during kmem_cache_destroy(). The caller must prevent concurrent allocs. * * Each cache has a short per-cpu head array, most allocs * and frees go into that array, and if that array overflows, then 1/2 * of the entries in the array are given back into the global cache. * The head array is strictly LIFO and should improve the cache hit rates. * On SMP, it additionally reduces the spinlock operations. * * The c_cpuarray may not be read with enabled local interrupts - * it's changed with a smp_call_function(). * * SMP synchronization: * constructors and destructors are called without any locking. * Several members in struct kmem_cache and struct slab never change, they * are accessed without any locking. * The per-cpu arrays are never accessed from the wrong cpu, no locking, * and local interrupts are disabled so slab code is preempt-safe. * The non-constant members are protected with a per-cache irq spinlock. * * Many thanks to Mark Hemment, who wrote another per-cpu slab patch * in 2000 - many ideas in the current implementation are derived from * his patch. * * Further notes from the original documentation: * * 11 April '97. Started multi-threading - markhe * The global cache-chain is protected by the mutex 'cache_chain_mutex'. * The sem is only needed when accessing/extending the cache-chain, which * can never happen inside an interrupt (kmem_cache_create(), * kmem_cache_shrink() and kmem_cache_reap()). * * At present, each engine can be growing a cache. This should be blocked. * * 15 March 2005. NUMA slab allocator. * Shai Fultheim <shai@scalex86.org>. * Shobhit Dayal <shobhit@calsoftinc.com> * Alok N Kataria <alokk@calsoftinc.com> * Christoph Lameter <christoph@lameter.com> * * Modified the slab allocator to be node aware on NUMA systems. * Each node has its own list of partial, free and full slabs. * All object allocations for a node occur from node specific slab lists. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/poison.h> #include <linux/swap.h> #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/cpuset.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/kallsyms.h> #include <linux/cpu.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/kmemtrace.h> #include <linux/rcupdate.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/nodemask.h> #include <linux/kmemleak.h> #include <linux/mempolicy.h> #include <linux/mutex.h> #include <linux/fault-inject.h> #include <linux/rtmutex.h> #include <linux/reciprocal_div.h> #include <linux/debugobjects.h> #include <linux/kmemcheck.h> #include <linux/memory.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> /* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * * STATS - 1 to collect stats for /proc/slabinfo. * 0 for faster, smaller code (especially in the critical paths). * * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) */ #ifdef CONFIG_DEBUG_SLAB #define DEBUG 1 #define STATS 1 #define FORCED_DEBUG 1 #else #define DEBUG 0 #define STATS 0 #define FORCED_DEBUG 0 #endif /* Shouldn't this be in a header file somewhere? */ #define BYTES_PER_WORD sizeof(void *) #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) #ifndef ARCH_KMALLOC_FLAGS #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #endif /* Legal flag mask for kmem_cache_create(). */ #if DEBUG # define CREATE_MASK (SLAB_RED_ZONE | \ SLAB_POISON | SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) #endif /* * kmem_bufctl_t: * * Bufctl's are used for linking objs within a slab * linked offsets. * * This implementation relies on "struct page" for locating the cache & * slab an object belongs to. * This allows the bufctl structure to be small (one int), but limits * the number of objects a slab (not a cache) can contain when off-slab * bufctls are used. The limit is the size of the largest general cache * that does not use off-slab slabs. * For 32bit archs with 4 kB pages, is this 56. * This is not serious, as it is only for large objects, when it is unwise * to have too many per slab. * Note: This limit can be raised by introducing a general cache whose size * is less than 512 (PAGE_SIZE<<3), but greater than 256. */ typedef unsigned int kmem_bufctl_t; #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) /* * struct slab * * Manages the objs in a slab. Placed either at the beginning of mem allocated * for a slab, or allocated from an general cache. * Slabs are chained into three list: fully used, partial, fully free slabs. */ struct slab { struct list_head list; unsigned long colouroff; void *s_mem; /* including colour offset */ unsigned int inuse; /* num of objs active in slab */ kmem_bufctl_t free; unsigned short nodeid; }; /* * struct slab_rcu * * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to * arrange for kmem_freepages to be called via RCU. This is useful if * we need to approach a kernel structure obliquely, from its address * obtained without the usual locking. We can lock the structure to * stabilize it and check it's still at the given address, only if we * can be sure that the memory has not been meanwhile reused for some * other kind of object (which our subsystem's lock might corrupt). * * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. * * We assume struct slab_rcu can overlay struct slab when destroying. */ struct slab_rcu { struct rcu_head head; struct kmem_cache *cachep; void *addr; }; /* * struct array_cache * * Purpose: * - LIFO ordering, to hand out cache-warm objects from _alloc * - reduce the number of linked list operations * - reduce spinlock operations * * The limit is stored in the per-cpu structure to reduce the data cache * footprint. * */ struct array_cache { unsigned int avail; unsigned int limit; unsigned int batchcount; unsigned int touched; spinlock_t lock; void *entry[]; /* * Must have this definition in here for the proper * alignment of array_cache. Also simplifies accessing * the entries. */ }; /* * bootstrap: The caches do not work without cpuarrays anymore, but the * cpuarrays are allocated from the generic caches... */ #define BOOT_CPUCACHE_ENTRIES 1 struct arraycache_init { struct array_cache cache; void *entries[BOOT_CPUCACHE_ENTRIES]; }; /* * The slab lists for all objects. */ struct kmem_list3 { struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_full; struct list_head slabs_free; unsigned long free_objects; unsigned int free_limit; unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ }; /* * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (3 * MAX_NUMNODES) struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; #define CACHE_CACHE 0 #define SIZE_AC MAX_NUMNODES #define SIZE_L3 (2 * MAX_NUMNODES) static int drain_freelist(struct kmem_cache *cache, struct kmem_list3 *l3, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); static void cache_reap(struct work_struct *unused); /* * This function must be completely optimized away if a constant is passed to * it. Mostly the same as what is in linux/slab.h except it returns an index. */ static __always_inline int index_of(const size_t size) { extern void __bad_size(void); if (__builtin_constant_p(size)) { int i = 0; #define CACHE(x) \ if (size <=x) \ return i; \ else \ i++; #include <linux/kmalloc_sizes.h> #undef CACHE __bad_size(); } else __bad_size(); return 0; } static int slab_early_init = 1; #define INDEX_AC index_of(sizeof(struct arraycache_init)) #define INDEX_L3 index_of(sizeof(struct kmem_list3)) static void kmem_list3_init(struct kmem_list3 *parent) { INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->shared = NULL; parent->alien = NULL; parent->colour_next = 0; spin_lock_init(&parent->list_lock); parent->free_objects = 0; parent->free_touched = 0; } #define MAKE_LIST(cachep, listp, slab, nodeid) \ do { \ INIT_LIST_HEAD(listp); \ list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ } while (0) #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ do { \ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) #define CFLGS_OFF_SLAB (0x80000000UL) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) #define BATCHREFILL_LIMIT 16 /* * Optimization question: fewer reaps means less probability for unnessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, * which could lock up otherwise freeable slabs. */ #define REAPTIMEOUT_CPUC (2*HZ) #define REAPTIMEOUT_LIST3 (4*HZ) #if STATS #define STATS_INC_ACTIVE(x) ((x)->num_active++) #define STATS_DEC_ACTIVE(x) ((x)->num_active--) #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) #define STATS_INC_GROWN(x) ((x)->grown++) #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) #define STATS_SET_HIGH(x) \ do { \ if ((x)->num_active > (x)->high_mark) \ (x)->high_mark = (x)->num_active; \ } while (0) #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) #define STATS_SET_FREEABLE(x, i) \ do { \ if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ } while (0) #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) #else #define STATS_INC_ACTIVE(x) do { } while (0) #define STATS_DEC_ACTIVE(x) do { } while (0) #define STATS_INC_ALLOCED(x) do { } while (0) #define STATS_INC_GROWN(x) do { } while (0) #define STATS_ADD_REAPED(x,y) do { } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) #define STATS_INC_ACOVERFLOW(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) #define STATS_INC_FREEHIT(x) do { } while (0) #define STATS_INC_FREEMISS(x) do { } while (0) #endif #if DEBUG /* * memory layout of objects: * 0 : objp * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that * the end of an object is aligned with the end of the real * allocation. Catches writes behind the end of the allocation. * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. * cachep->obj_offset: The real object. * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address * [BYTES_PER_WORD long] */ static int obj_offset(struct kmem_cache *cachep) { return cachep->obj_offset; } static int obj_size(struct kmem_cache *cachep) { return cachep->obj_size; } static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); return (unsigned long long*) (objp + obj_offset(cachep) - sizeof(unsigned long long)); } static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) return (unsigned long long *)(objp + cachep->buffer_size - sizeof(unsigned long long) - REDZONE_ALIGN); return (unsigned long long *) (objp + cachep->buffer_size - sizeof(unsigned long long)); } static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); } #else #define obj_offset(x) 0 #define obj_size(cachep) (cachep->buffer_size) #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) #endif #ifdef CONFIG_TRACING size_t slab_buffer_size(struct kmem_cache *cachep) { return cachep->buffer_size; } EXPORT_SYMBOL(slab_buffer_size); #endif /* * Do not go above this order unless 0 objects fit into the slab. */ #define BREAK_GFP_ORDER_HI 1 #define BREAK_GFP_ORDER_LO 0 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; /* * Functions for storing/retrieving the cachep and or slab from the page * allocator. These are used to find the slab an obj belongs to. With kfree(), * these are used to find the cache which an obj belongs to. */ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) { page->lru.next = (struct list_head *)cache; } static inline struct kmem_cache *page_get_cache(struct page *page) { page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct kmem_cache *)page->lru.next; } static inline void page_set_slab(struct page *page, struct slab *slab) { page->lru.prev = (struct list_head *)slab; } static inline struct slab *page_get_slab(struct page *page) { BUG_ON(!PageSlab(page)); return (struct slab *)page->lru.prev; } static inline struct kmem_cache *virt_to_cache(const void *obj) { struct page *page = virt_to_head_page(obj); return page_get_cache(page); } static inline struct slab *virt_to_slab(const void *obj) { struct page *page = virt_to_head_page(obj); return page_get_slab(page); } static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, unsigned int idx) { return slab->s_mem + cache->buffer_size * idx; } /* * We want to avoid an expensive divide : (offset / cache->buffer_size) * Using the fact that buffer_size is a constant for a particular cache, * we can replace (offset / cache->buffer_size) by * reciprocal_divide(offset, cache->reciprocal_buffer_size) */ static inline unsigned int obj_to_index(const struct kmem_cache *cache, const struct slab *slab, void *obj) { u32 offset = (obj - slab->s_mem); return reciprocal_divide(offset, cache->reciprocal_buffer_size); } /* * These are the default caches for kmalloc. Custom caches can have other sizes. */ struct cache_sizes malloc_sizes[] = { #define CACHE(x) { .cs_size = (x) }, #include <linux/kmalloc_sizes.h> CACHE(ULONG_MAX) #undef CACHE }; EXPORT_SYMBOL(malloc_sizes); /* Must match cache_sizes above. Out of line to keep cache footprint low. */ struct cache_names { char *name; char *name_dma; }; static struct cache_names __initdata cache_names[] = { #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, #include <linux/kmalloc_sizes.h> {NULL,} #undef CACHE }; static struct arraycache_init initarray_cache __initdata = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ static struct kmem_cache cache_cache = { .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, .buffer_size = sizeof(struct kmem_cache), .name = "kmem_cache", }; #define BAD_ALIEN_MAGIC 0x01020304ul /* * chicken and egg problem: delay the per-cpu array allocation * until the general caches are up. */ static enum { NONE, PARTIAL_AC, PARTIAL_L3, EARLY, FULL } g_cpucache_up; /* * used by boot code to determine if it can use slab based allocator */ int slab_is_available(void) { return g_cpucache_up >= EARLY; } #ifdef CONFIG_LOCKDEP /* * Slab sometimes uses the kmalloc slabs to store the slab headers * for other slabs "off slab". * The locking for this is tricky in that it nests within the locks * of all other slabs in a few places; to deal with this special * locking we put on-slab caches into a separate lock-class. * * We set lock class for alien array caches which are up during init. * The lock annotation will be lost if all cpus of a node goes down and * then comes back up during hotplug */ static struct lock_class_key on_slab_l3_key; static struct lock_class_key on_slab_alc_key; static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; if (g_cpucache_up != FULL) return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { struct array_cache **alc; struct kmem_list3 *l3; int r; l3 = s->cs_cachep->nodelists[q]; if (!l3 || OFF_SLAB(s->cs_cachep)) continue; lockdep_set_class(&l3->list_lock, &on_slab_l3_key); alc = l3->alien; /* * FIXME: This check for BAD_ALIEN_MAGIC * should go away when common slab code is taught to * work even without alien caches. * Currently, non NUMA code returns BAD_ALIEN_MAGIC * for alloc_alien_cache, */ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) continue; for_each_node(r) { if (alc[r]) lockdep_set_class(&alc[r]->lock, &on_slab_alc_key); } } } static inline void init_lock_keys(void) { int node; for_each_node(node) init_node_lock_keys(node); } #else static void init_node_lock_keys(int q) { } static inline void init_lock_keys(void) { } #endif /* * Guard access to the cache-chain. */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return cachep->array[smp_processor_id()]; } static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) { struct cache_sizes *csizep = malloc_sizes; #if DEBUG /* This happens if someone tries to call * kmem_cache_create(), or __kmalloc(), before * the generic caches are initialized. */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif if (!size) return ZERO_SIZE_PTR; while (size > csizep->cs_size) csizep++; /* * Really subtle: The last entry with cs->cs_size==ULONG_MAX * has cs_{dma,}cachep==NULL. Thus no special case * for large kmalloc calls required. */ #ifdef CONFIG_ZONE_DMA if (unlikely(gfpflags & GFP_DMA)) return csizep->cs_dmacachep; #endif return csizep->cs_cachep; } static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) { return __find_general_cachep(size, gfpflags); } static size_t slab_mgmt_size(size_t nr_objs, size_t align) { return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); } /* * Calculate the number of objects and left-over bytes for a given buffer size. */ static void cache_estimate(unsigned long gfporder, size_t buffer_size, size_t align, int flags, size_t *left_over, unsigned int *num) { int nr_objs; size_t mgmt_size; size_t slab_size = PAGE_SIZE << gfporder; /* * The slab management structure can be either off the slab or * on it. For the latter case, the memory allocated for a * slab is used for: * * - The struct slab * - One kmem_bufctl_t for each object * - Padding to respect alignment of @align * - @buffer_size bytes for each object * * If the slab management structure is off the slab, then the * alignment will already be calculated into the size. Because * the slabs are all pages aligned, the objects will be at the * correct alignment when allocated. */ if (flags & CFLGS_OFF_SLAB) { mgmt_size = 0; nr_objs = slab_size / buffer_size; if (nr_objs > SLAB_LIMIT) nr_objs = SLAB_LIMIT; } else { /* * Ignore padding for the initial guess. The padding * is at most @align-1 bytes, and @buffer_size is at * least @align. In the worst case, this result will * be one greater than the number of objects that fit * into the memory allocation when taking the padding * into account. */ nr_objs = (slab_size - sizeof(struct slab)) / (buffer_size + sizeof(kmem_bufctl_t)); /* * This calculated number will be either the right * amount, or one greater than what we want. */ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size > slab_size) nr_objs--; if (nr_objs > SLAB_LIMIT) nr_objs = SLAB_LIMIT; mgmt_size = slab_mgmt_size(nr_objs, align); } *num = nr_objs; *left_over = slab_size - nr_objs*buffer_size - mgmt_size; } #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", function, cachep->name, msg); dump_stack(); } /* * By default on NUMA we use alien caches to stage the freeing of * objects allocated from other nodes. This causes massive memory * inefficiencies when using fake NUMA setup to split memory into a * large number of small nodes, so it can be disabled on the command * line */ static int use_alien_caches __read_mostly = 1; static int __init noaliencache_setup(char *s) { use_alien_caches = 0; return 1; } __setup("noaliencache", noaliencache_setup); #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). * These take care of doing round robin flushing of alien caches (containing * objects freed on different nodes from which they were allocated) and the * flushing of remote pcps by calling drain_node_pages. */ static DEFINE_PER_CPU(unsigned long, slab_reap_node); static void init_reap_node(int cpu) { int node; node = next_node(cpu_to_mem(cpu), node_online_map); if (node == MAX_NUMNODES) node = first_node(node_online_map); per_cpu(slab_reap_node, cpu) = node; } static void next_reap_node(void) { int node = __get_cpu_var(slab_reap_node); node = next_node(node, node_online_map); if (unlikely(node >= MAX_NUMNODES)) node = first_node(node_online_map); __get_cpu_var(slab_reap_node) = node; } #else #define init_reap_node(cpu) do { } while (0) #define next_reap_node(void) do { } while (0) #endif /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz * via the workqueue/eventd. * Add the CPU number into the expiration time to minimize the possibility of * the CPUs getting into lockstep and contending for the global cache chain * lock. */ static void __cpuinit start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), * init_workqueues() has already run, so keventd will be setup * at that time. */ if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DELAYED_WORK(reap_work, cache_reap); schedule_delayed_work_on(cpu, reap_work, __round_jiffies_relative(HZ, cpu)); } } static struct array_cache *alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) { int memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *nc = NULL; nc = kmalloc_node(memsize, gfp, node); /* * The array_cache structures contain pointers to free object. * However, when such objects are allocated or transfered to another * cache the pointers are not cleared and they could be counted as * valid references during a kmemleak scan. Therefore, kmemleak must * not scan such objects. */ kmemleak_no_scan(nc); if (nc) { nc->avail = 0; nc->limit = entries; nc->batchcount = batchcount; nc->touched = 0; spin_lock_init(&nc->lock); } return nc; } /* * Transfer objects in one arraycache to another. * Locking must be handled by the caller. * * Return the number of entries transferred. */ static int transfer_objects(struct array_cache *to, struct array_cache *from, unsigned int max) { /* Figure out how many entries to transfer */ int nr = min(min(from->avail, max), to->limit - to->avail); if (!nr) return 0; memcpy(to->entry + to->avail, from->entry + from->avail -nr, sizeof(void *) *nr); from->avail -= nr; to->avail += nr; return nr; } #ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, l3) do { } while (0) static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { return (struct array_cache **)BAD_ALIEN_MAGIC; } static inline void free_alien_cache(struct array_cache **ac_ptr) { } static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { return 0; } static inline void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { return NULL; } static inline void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return NULL; } #else /* CONFIG_NUMA */ static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { struct array_cache **ac_ptr; int memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) limit = 12; ac_ptr = kzalloc_node(memsize, gfp, node); if (ac_ptr) { for_each_node(i) { if (i == node || !node_online(i)) continue; ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); if (!ac_ptr[i]) { for (i--; i >= 0; i--) kfree(ac_ptr[i]); kfree(ac_ptr); return NULL; } } } return ac_ptr; } static void free_alien_cache(struct array_cache **ac_ptr) { int i; if (!ac_ptr) return; for_each_node(i) kfree(ac_ptr[i]); kfree(ac_ptr); } static void __drain_alien_cache(struct kmem_cache *cachep, struct array_cache *ac, int node) { struct kmem_list3 *rl3 = cachep->nodelists[node]; if (ac->avail) { spin_lock(&rl3->list_lock); /* * Stuff objects into the remote nodes shared array first. * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ if (rl3->shared) transfer_objects(rl3->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; spin_unlock(&rl3->list_lock); } } /* * Called from cache_reap() to regularly drain alien caches round robin. */ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) { int node = __get_cpu_var(slab_reap_node); if (l3->alien) { struct array_cache *ac = l3->alien[node]; if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { __drain_alien_cache(cachep, ac, node); spin_unlock_irq(&ac->lock); } } } static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) { int i = 0; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { ac = alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); spin_unlock_irqrestore(&ac->lock, flags); } } } static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { struct slab *slabp = virt_to_slab(objp); int nodeid = slabp->nodeid; struct kmem_list3 *l3; struct array_cache *alien = NULL; int node; node = numa_mem_id(); /* * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ if (likely(slabp->nodeid == node)) return 0; l3 = cachep->nodelists[node]; STATS_INC_NODEFREES(cachep); if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, alien, nodeid); } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { spin_lock(&(cachep->nodelists[nodeid])->list_lock); free_block(cachep, &objp, 1, nodeid); spin_unlock(&(cachep->nodelists[nodeid])->list_lock); } return 1; } #endif /* * Allocates and initializes nodelists for a node on each slab cache, used for * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 * will be allocated off-node since memory is not yet online for the new node. * When hotplugging memory or a cpu, existing nodelists are not replaced if * already in use. * * Must hold cache_chain_mutex. */ static int init_cache_nodelists_node(int node) { struct kmem_cache *cachep; struct kmem_list3 *l3; const int memsize = sizeof(struct kmem_list3); list_for_each_entry(cachep, &cache_chain, next) { /* * Set up the size64 kmemlist for cpu before we can * begin anything. Make sure some other cpu on this * node has not already allocated this */ if (!cachep->nodelists[node]) { l3 = kmalloc_node(memsize, GFP_KERNEL, node); if (!l3) return -ENOMEM; kmem_list3_init(l3); l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; /* * The l3s don't come and go as CPUs come and * go. cache_chain_mutex is sufficient * protection here. */ cachep->nodelists[node] = l3; } spin_lock_irq(&cachep->nodelists[node]->list_lock); cachep->nodelists[node]->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&cachep->nodelists[node]->list_lock); } return 0; } static void __cpuinit cpuup_canceled(long cpu) { struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_mem(cpu); const struct cpumask *mask = cpumask_of_node(node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; /* cpu is dead; no one can alloc from it. */ nc = cachep->array[cpu]; cachep->array[cpu] = NULL; l3 = cachep->nodelists[node]; if (!l3) goto free_array_cache; spin_lock_irq(&l3->list_lock); /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; if (nc) free_block(cachep, nc->entry, nc->avail, node); if (!cpumask_empty(mask)) { spin_unlock_irq(&l3->list_lock); goto free_array_cache; } shared = l3->shared; if (shared) { free_block(cachep, shared->entry, shared->avail, node); l3->shared = NULL; } alien = l3->alien; l3->alien = NULL; spin_unlock_irq(&l3->list_lock); kfree(shared); if (alien) { drain_alien_cache(cachep, alien); free_alien_cache(alien); } free_array_cache: kfree(nc); } /* * In the previous loop, all the objects were freed to * the respective cache's slabs, now we can go ahead and * shrink each nodelist to its limit. */ list_for_each_entry(cachep, &cache_chain, next) { l3 = cachep->nodelists[node]; if (!l3) continue; drain_freelist(cachep, l3, l3->free_objects); } } static int __cpuinit cpuup_prepare(long cpu) { struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_mem(cpu); int err; /* * We need to do this right in the beginning since * alloc_arraycache's are going to use this list. * kmalloc_node allows us to add the slab to the right * kmem_list3 and not this cpu's kmem_list3 */ err = init_cache_nodelists_node(node); if (err < 0) goto bad; /* * Now we can go ahead with allocating the shared arrays and * array caches */ list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared = NULL; struct array_cache **alien = NULL; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount, GFP_KERNEL); if (!nc) goto bad; if (cachep->shared) { shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d, GFP_KERNEL); if (!shared) { kfree(nc); goto bad; } } if (use_alien_caches) { alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); if (!alien) { kfree(shared); kfree(nc); goto bad; } } cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); spin_lock_irq(&l3->list_lock); if (!l3->shared) { /* * We are serialised from CPU_DEAD or * CPU_UP_CANCELLED by the cpucontrol lock */ l3->shared = shared; shared = NULL; } #ifdef CONFIG_NUMA if (!l3->alien) { l3->alien = alien; alien = NULL; } #endif spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(alien); } init_node_lock_keys(node); return 0; bad: cpuup_canceled(cpu); return -ENOMEM; } static int __cpuinit cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&cache_chain_mutex); err = cpuup_prepare(cpu); mutex_unlock(&cache_chain_mutex); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: /* * Shutdown cache reaper. Note that the cache_chain_mutex is * held so that if cache_reap() is invoked it cannot do * anything expensive but will only modify reap_work * and reschedule the timer. */ cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); /* Now the cache_reaper is guaranteed to be not running. */ per_cpu(slab_reap_work, cpu).work.func = NULL; break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: start_cpu_timer(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* * Even if all the cpus of a node are down, we don't free the * kmem_list3 of any cache. This to avoid a race between * cpu_down, and a kmalloc allocation from another cpu for * memory from the node of the cpu going down. The list3 * structure is usually allocated from kmem_cache_create() and * gets destroyed at kmem_cache_destroy(). */ /* fall through */ #endif case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: mutex_lock(&cache_chain_mutex); cpuup_canceled(cpu); mutex_unlock(&cache_chain_mutex); break; } return notifier_from_errno(err); } static struct notifier_block __cpuinitdata cpucache_notifier = { &cpuup_callback, NULL, 0 }; #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) /* * Drains freelist for a node on each slab cache, used for memory hot-remove. * Returns -EBUSY if all objects cannot be drained so that the node is not * removed. * * Must hold cache_chain_mutex. */ static int __meminit drain_cache_nodelists_node(int node) { struct kmem_cache *cachep; int ret = 0; list_for_each_entry(cachep, &cache_chain, next) { struct kmem_list3 *l3; l3 = cachep->nodelists[node]; if (!l3) continue; drain_freelist(cachep, l3, l3->free_objects); if (!list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial)) { ret = -EBUSY; break; } } return ret; } static int __meminit slab_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mnb = arg; int ret = 0; int nid; nid = mnb->status_change_nid; if (nid < 0) goto out; switch (action) { case MEM_GOING_ONLINE: mutex_lock(&cache_chain_mutex); ret = init_cache_nodelists_node(nid); mutex_unlock(&cache_chain_mutex); break; case MEM_GOING_OFFLINE: mutex_lock(&cache_chain_mutex); ret = drain_cache_nodelists_node(nid); mutex_unlock(&cache_chain_mutex); break; case MEM_ONLINE: case MEM_OFFLINE: case MEM_CANCEL_ONLINE: case MEM_CANCEL_OFFLINE: break; } out: return ret ? notifier_from_errno(ret) : NOTIFY_OK; } #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ /* * swap the static kmem_list3 with kmalloced memory */ static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid) { struct kmem_list3 *ptr; ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); BUG_ON(!ptr); memcpy(ptr, list, sizeof(struct kmem_list3)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->list_lock); MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->nodelists[nodeid] = ptr; } /* * For setting up all the kmem_list3s for cache whose buffer_size is same as * size of kmem_list3. */ static void __init set_up_list3s(struct kmem_cache *cachep, int index) { int node; for_each_online_node(node) { cachep->nodelists[node] = &initkmem_list3[index + node]; cachep->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; } } /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). */ void __init kmem_cache_init(void) { size_t left_over; struct cache_sizes *sizes; struct cache_names *names; int i; int order; int node; if (num_possible_nodes() == 1) use_alien_caches = 0; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); if (i < MAX_NUMNODES) cache_cache.nodelists[i] = NULL; } set_up_list3s(&cache_cache, CACHE_CACHE); /* * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory. */ if (totalram_pages > (32 << 20) >> PAGE_SHIFT) slab_break_gfp_order = BREAK_GFP_ORDER_HI; /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: * 1) initialize the cache_cache cache: it contains the struct * kmem_cache structures of all caches, except cache_cache itself: * cache_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. * 2) Create the first kmalloc cache. * The struct kmem_cache for the new cache is allocated normally. * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. * 4) Replace the __init data head arrays for cache_cache and the first * kmalloc cache with kmalloc allocated arrays. * 5) Replace the __init data for kmem_list3 for cache_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ node = numa_mem_id(); /* 1) create the cache_cache */ INIT_LIST_HEAD(&cache_chain); list_add(&cache_cache.next, &cache_chain); cache_cache.colour_off = cache_line_size(); cache_cache.array[smp_processor_id()] = &initarray_cache.cache; cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids, which * can be less than MAX_NUMNODES. */ cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + nr_node_ids * sizeof(struct kmem_list3 *); #if DEBUG cache_cache.obj_size = cache_cache.buffer_size; #endif cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); cache_cache.reciprocal_buffer_size = reciprocal_value(cache_cache.buffer_size); for (order = 0; order < MAX_ORDER; order++) { cache_estimate(order, cache_cache.buffer_size, cache_line_size(), 0, &left_over, &cache_cache.num); if (cache_cache.num) break; } BUG_ON(!cache_cache.num); cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ sizes = malloc_sizes; names = cache_names; /* * Initialize the caches that provide memory for the array cache and the * kmem_list3 structures first. Without this, further allocations will * bug. */ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = kmem_cache_create(names[INDEX_L3].name, sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); } slab_early_init = 0; while (sizes->cs_size != ULONG_MAX) { /* * For performance, all the general caches are L1 aligned. * This should be particularly beneficial on SMP boxes, as it * eliminates "false sharing". * Note for systems short on memory removing the alignment will * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { sizes->cs_cachep = kmem_cache_create(names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_create( names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, NULL); #endif sizes++; names++; } /* 4) Replace the bootstrap head arrays */ { struct array_cache *ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); memcpy(ptr, cpu_cache_get(&cache_cache), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); cache_cache.array[smp_processor_id()] = ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) != &initarray_generic.cache); memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = ptr; } /* 5) Replace the bootstrap kmem_list3's */ { int nid; for_each_online_node(nid) { init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); if (INDEX_AC != INDEX_L3) { init_list(malloc_sizes[INDEX_L3].cs_cachep, &initkmem_list3[SIZE_L3 + nid], nid); } } } g_cpucache_up = EARLY; } void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); mutex_unlock(&cache_chain_mutex); /* Done! */ g_cpucache_up = FULL; /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus */ register_cpu_notifier(&cpucache_notifier); #ifdef CONFIG_NUMA /* * Register a memory hotplug callback that initializes and frees * nodelists. */ hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); #endif /* * The reap timers are started later, with a module init call: That part * of the kernel is not yet operational. */ } static int __init cpucache_init(void) { int cpu; /* * Register the timers that return unneeded pages to the page allocator */ for_each_online_cpu(cpu) start_cpu_timer(cpu); return 0; } __initcall(cpucache_init); /* * Interface to system's page allocator. No need to hold the cache-lock. * * If we requested dmaable memory, we will get it. Even if we * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct page *page; int nr_pages; int i; #ifndef CONFIG_MMU /* * Nommu uses slab's for process anonymous memory allocations, and thus * requires __GFP_COMP to properly refcount higher order allocations */ flags |= __GFP_COMP; #endif flags |= cachep->gfpflags; if (cachep->flags & SLAB_RECLAIM_ACCOUNT) flags |= __GFP_RECLAIMABLE; page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); if (!page) return NULL; nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) add_zone_page_state(page_zone(page), NR_SLAB_RECLAIMABLE, nr_pages); else add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); for (i = 0; i < nr_pages; i++) __SetPageSlab(page + i); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); if (cachep->ctor) kmemcheck_mark_uninitialized_pages(page, nr_pages); else kmemcheck_mark_unallocated_pages(page, nr_pages); } return page_address(page); } /* * Interface to system's page release. */ static void kmem_freepages(struct kmem_cache *cachep, void *addr) { unsigned long i = (1 << cachep->gfporder); struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; kmemcheck_free_shadow(page, cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) sub_zone_page_state(page_zone(page), NR_SLAB_RECLAIMABLE, nr_freed); else sub_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_freed); while (i--) { BUG_ON(!PageSlab(page)); __ClearPageSlab(page); page++; } if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; free_pages((unsigned long)addr, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) { struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct kmem_cache *cachep = slab_rcu->cachep; kmem_freepages(cachep, slab_rcu->addr); if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slab_rcu); } #if DEBUG #ifdef CONFIG_DEBUG_PAGEALLOC static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, unsigned long caller) { int size = obj_size(cachep); addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; if (size < 5 * sizeof(unsigned long)) return; *addr++ = 0x12345678; *addr++ = caller; *addr++ = smp_processor_id(); size -= 3 * sizeof(unsigned long); { unsigned long *sptr = &caller; unsigned long svalue; while (!kstack_end(sptr)) { svalue = *sptr++; if (kernel_text_address(svalue)) { *addr++ = svalue; size -= sizeof(unsigned long); if (size <= sizeof(unsigned long)) break; } } } *addr++ = 0x87654321; } #endif static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { int size = obj_size(cachep); addr = &((char *)addr)[obj_offset(cachep)]; memset(addr, val, size); *(unsigned char *)(addr + size - 1) = POISON_END; } static void dump_line(char *data, int offset, int limit) { int i; unsigned char error = 0; int bad_count = 0; printk(KERN_ERR "%03x:", offset); for (i = 0; i < limit; i++) { if (data[offset + i] != POISON_FREE) { error = data[offset + i]; bad_count++; } printk(" %02x", (unsigned char)data[offset + i]); } printk("\n"); if (bad_count == 1) { error ^= POISON_FREE; if (!(error & (error - 1))) { printk(KERN_ERR "Single bit error detected. Probably " "bad RAM.\n"); #ifdef CONFIG_X86 printk(KERN_ERR "Run memtest86+ or a similar memory " "test tool.\n"); #else printk(KERN_ERR "Run a memory test tool.\n"); #endif } } } #endif #if DEBUG static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) { int i, size; char *realobj; if (cachep->flags & SLAB_RED_ZONE) { printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } if (cachep->flags & SLAB_STORE_USER) { printk(KERN_ERR "Last user: [<%p>]", *dbg_userword(cachep, objp)); print_symbol("(%s)", (unsigned long)*dbg_userword(cachep, objp)); printk("\n"); } realobj = (char *)objp + obj_offset(cachep); size = obj_size(cachep); for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; if (i + limit > size) limit = size - i; dump_line(realobj, i, limit); } } static void check_poison_obj(struct kmem_cache *cachep, void *objp) { char *realobj; int size, i; int lines = 0; realobj = (char *)objp + obj_offset(cachep); size = obj_size(cachep); for (i = 0; i < size; i++) { char exp = POISON_FREE; if (i == size - 1) exp = POISON_END; if (realobj[i] != exp) { int limit; /* Mismatch ! */ /* Print header */ if (lines == 0) { printk(KERN_ERR "Slab corruption: %s start=%p, len=%d\n", cachep->name, realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ i = (i / 16) * 16; limit = 16; if (i + limit > size) limit = size - i; dump_line(realobj, i, limit); i += 16; lines++; /* Limit to 5 lines */ if (lines > 5) break; } } if (lines != 0) { /* Print some data about the neighboring objects, if they * exist: */ struct slab *slabp = virt_to_slab(objp); unsigned int objnr; objnr = obj_to_index(cachep, slabp, objp); if (objnr) { objp = index_to_obj(cachep, slabp, objnr - 1); realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Prev obj: start=%p, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } if (objnr + 1 < cachep->num) { objp = index_to_obj(cachep, slabp, objnr + 1); realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Next obj: start=%p, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } } } #endif #if DEBUG static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) { int i; for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slabp, i); if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if (cachep->buffer_size % PAGE_SIZE == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else check_poison_obj(cachep, objp); #endif } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "start of a freed object " "was overwritten"); if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "end of a freed object " "was overwritten"); } } } #else static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) { } #endif /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed * @slabp: slab pointer being destroyed * * Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) { void *addr = slabp->s_mem - slabp->colouroff; slab_destroy_debugcheck(cachep, slabp); if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { struct slab_rcu *slab_rcu; slab_rcu = (struct slab_rcu *)slabp; slab_rcu->cachep = cachep; slab_rcu->addr = addr; call_rcu(&slab_rcu->head, kmem_rcu_free); } else { kmem_freepages(cachep, addr); if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slabp); } } static void __kmem_cache_destroy(struct kmem_cache *cachep) { int i; struct kmem_list3 *l3; for_each_online_cpu(i) kfree(cachep->array[i]); /* NUMA: free the list3 structures */ for_each_online_node(i) { l3 = cachep->nodelists[i]; if (l3) { kfree(l3->shared); free_alien_cache(l3->alien); kfree(l3); } } kmem_cache_free(&cache_cache, cachep); } /** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created * @size: size of objects to be created in this cache. * @align: required alignment for the objects. * @flags: slab allocation flags * * Also calculates the number of objects per slab. * * This could be made much more intelligent. For now, try to avoid using * high order pages for slabs. When the gfp() functions are more friendly * towards high-order requests, this should be changed. */ static size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, size_t align, unsigned long flags) { unsigned long offslab_limit; size_t left_over = 0; int gfporder; for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { unsigned int num; size_t remainder; cache_estimate(gfporder, size, align, flags, &remainder, &num); if (!num) continue; if (flags & CFLGS_OFF_SLAB) { /* * Max number of objs-per-slab for caches which * use off-slab slabs. Needed to avoid a possible * looping condition in cache_grow(). */ offslab_limit = size - sizeof(struct slab); offslab_limit /= sizeof(kmem_bufctl_t); if (num > offslab_limit) break; } /* Found something acceptable - save it away */ cachep->num = num; cachep->gfporder = gfporder; left_over = remainder; /* * A VFS-reclaimable slab tends to have most allocations * as GFP_NOFS and we really don't want to have to be allocating * higher-order pages when we are unable to shrink dcache. */ if (flags & SLAB_RECLAIM_ACCOUNT) break; /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. */ if (gfporder >= slab_break_gfp_order) break; /* * Acceptable internal fragmentation? */ if (left_over * 8 <= (PAGE_SIZE << gfporder)) break; } return left_over; } static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { if (g_cpucache_up == FULL) return enable_cpucache(cachep, gfp); if (g_cpucache_up == NONE) { /* * Note: the first kmem_cache_create must create the cache * that's used by kmalloc(24), otherwise the creation of * further caches will BUG(). */ cachep->array[smp_processor_id()] = &initarray_generic.cache; /* * If the cache that's used by kmalloc(sizeof(kmem_list3)) is * the first cache, then we need to set up all its list3s, * otherwise the creation of further caches will BUG(). */ set_up_list3s(cachep, SIZE_AC); if (INDEX_AC == INDEX_L3) g_cpucache_up = PARTIAL_L3; else g_cpucache_up = PARTIAL_AC; } else { cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init), gfp); if (g_cpucache_up == PARTIAL_AC) { set_up_list3s(cachep, SIZE_L3); g_cpucache_up = PARTIAL_L3; } else { int node; for_each_online_node(node) { cachep->nodelists[node] = kmalloc_node(sizeof(struct kmem_list3), gfp, node); BUG_ON(!cachep->nodelists[node]); kmem_list3_init(cachep->nodelists[node]); } } } cachep->nodelists[numa_mem_id()]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; cpu_cache_get(cachep)->avail = 0; cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; cpu_cache_get(cachep)->batchcount = 1; cpu_cache_get(cachep)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; return 0; } /** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. * Note that kmem_cache_name() is not guaranteed to return the same pointer, * therefore applications must manage it themselves. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; gfp_t gfp; /* * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || size > KMALLOC_MAX_SIZE) { printk(KERN_ERR "%s: Early error in slab %s\n", __func__, name); BUG(); } /* * We use cache_chain_mutex to ensure a consistent view of * cpu_online_mask as well. Please see cpuup_callback */ if (slab_is_available()) { get_online_cpus(); mutex_lock(&cache_chain_mutex); } list_for_each_entry(pc, &cache_chain, next) { char tmp; int res; /* * This happens when the module gets unloaded and doesn't * destroy its slab cache and no-one else reuses the vmalloc * area of the module. Print a warning. */ res = probe_kernel_address(pc->name, tmp); if (res) { printk(KERN_ERR "SLAB: cache with size %d has lost its name\n", pc->buffer_size); continue; } if (!strcmp(pc->name, name)) { printk(KERN_ERR "kmem_cache_create: duplicate cache %s\n", name); dump_stack(); goto oops; } } #if DEBUG WARN_ON(strchr(name, ' ')); /* It confuses parsers */ #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with * large objects, if the increased size would increase the object size * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2 * sizeof(unsigned long long))) flags |= SLAB_RED_ZONE | SLAB_STORE_USER; if (!(flags & SLAB_DESTROY_BY_RCU)) flags |= SLAB_POISON; #endif if (flags & SLAB_DESTROY_BY_RCU) BUG_ON(flags & SLAB_POISON); #endif /* * Always checks flags, a caller might be expecting debug support which * isn't available. */ BUG_ON(flags & ~CREATE_MASK); /* * Check that size is in terms of words. This is needed to avoid * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. */ if (size & (BYTES_PER_WORD - 1)) { size += (BYTES_PER_WORD - 1); size &= ~(BYTES_PER_WORD - 1); } /* calculate the final buffer alignment: */ /* 1) arch recommendation: can be overridden for debug */ if (flags & SLAB_HWCACHE_ALIGN) { /* * Default alignment: as specified by the arch code. Except if * an object is really small, then squeeze multiple objects into * one cacheline. */ ralign = cache_line_size(); while (size <= ralign / 2) ralign /= 2; } else { ralign = BYTES_PER_WORD; } /* * Redzoning and user store require word alignment or possibly larger. * Note this will be overridden by architecture or caller mandated * alignment if either is greater than BYTES_PER_WORD. */ if (flags & SLAB_STORE_USER) ralign = BYTES_PER_WORD; if (flags & SLAB_RED_ZONE) { ralign = REDZONE_ALIGN; /* If redzoning, ensure that the second redzone is suitably * aligned, by adjusting the object size accordingly. */ size += REDZONE_ALIGN - 1; size &= ~(REDZONE_ALIGN - 1); } /* 2) arch mandated alignment */ if (ralign < ARCH_SLAB_MINALIGN) { ralign = ARCH_SLAB_MINALIGN; } /* 3) caller mandated alignment */ if (ralign < align) { ralign = align; } /* disable debug if not aligning with REDZONE_ALIGN */ if (ralign & (__alignof__(unsigned long long) - 1)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. */ align = ralign; if (slab_is_available()) gfp = GFP_KERNEL; else gfp = GFP_NOWAIT; /* Get cache's description obj. */ cachep = kmem_cache_zalloc(&cache_cache, gfp); if (!cachep) goto oops; #if DEBUG cachep->obj_size = size; /* * Both debugging options require word-alignment which is calculated * into align above. */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ cachep->obj_offset += align; size += align + sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of * the real object. But if the second red zone needs to be * aligned to 64 bits, we must allow that much space. */ if (flags & SLAB_RED_ZONE) size += REDZONE_ALIGN; else size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); size = PAGE_SIZE; } #endif #endif /* * Determine if the slab management is 'on' or 'off' slab. * (bootstrapping cannot cope with offslab caches so don't do * it too early on. Always use on-slab management when * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) */ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init && !(flags & SLAB_NOLEAKTRACE)) /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). */ flags |= CFLGS_OFF_SLAB; size = ALIGN(size, align); left_over = calculate_slab_order(cachep, size, align, flags); if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); kmem_cache_free(&cache_cache, cachep); cachep = NULL; goto oops; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), align); /* * If the slab has been placed off-slab, and we have enough space then * move it on-slab. This is at the expense of any extra colouring. */ if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { flags &= ~CFLGS_OFF_SLAB; left_over -= slab_size; } if (flags & CFLGS_OFF_SLAB) { /* really off slab. No need for manual alignment */ slab_size = cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); #ifdef CONFIG_PAGE_POISONING /* If we're going to use the generic kernel_map_pages() * poisoning, then it's going to smash the contents of * the redzone and userword anyhow, so switch them off. */ if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); #endif } cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ if (cachep->colour_off < align) cachep->colour_off = align; cachep->colour = left_over / cachep->colour_off; cachep->slab_size = slab_size; cachep->flags = flags; cachep->gfpflags = 0; if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); if (flags & CFLGS_OFF_SLAB) { cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); /* * This is a possibility for one of the malloc_sizes caches. * But since we go off slab only for object size greater than * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, * this should not happen at all. * But leave a BUG_ON for some lucky dude. */ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } cachep->ctor = ctor; cachep->name = name; if (setup_cpu_cache(cachep, gfp)) { __kmem_cache_destroy(cachep); cachep = NULL; goto oops; } /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); oops: if (!cachep && (flags & SLAB_PANIC)) panic("kmem_cache_create(): failed to create slab `%s'\n", name); if (slab_is_available()) { mutex_unlock(&cache_chain_mutex); put_online_cpus(); } return cachep; } EXPORT_SYMBOL(kmem_cache_create); #if DEBUG static void check_irq_off(void) { BUG_ON(!irqs_disabled()); } static void check_irq_on(void) { BUG_ON(irqs_disabled()); } static void check_spinlock_acquired(struct kmem_cache *cachep) { #ifdef CONFIG_SMP check_irq_off(); assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); #endif } static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) { #ifdef CONFIG_SMP check_irq_off(); assert_spin_locked(&cachep->nodelists[node]->list_lock); #endif } #else #define check_irq_off() do { } while(0) #define check_irq_on() do { } while(0) #define check_spinlock_acquired(x) do { } while(0) #define check_spinlock_acquired_node(x, y) do { } while(0) #endif static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node); static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; int node = numa_mem_id(); check_irq_off(); ac = cpu_cache_get(cachep); spin_lock(&cachep->nodelists[node]->list_lock); free_block(cachep, ac->entry, ac->avail, node); spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_list3 *l3; int node; on_each_cpu(do_drain, cachep, 1); check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; if (l3 && l3->alien) drain_alien_cache(cachep, l3->alien); } for_each_online_node(node) { l3 = cachep->nodelists[node]; if (l3) drain_array(cachep, l3, l3->shared, 1, node); } } /* * Remove slabs from the list of free slabs. * Specify the number of slabs to drain in tofree. * * Returns the actual number of slabs released. */ static int drain_freelist(struct kmem_cache *cache, struct kmem_list3 *l3, int tofree) { struct list_head *p; int nr_freed; struct slab *slabp; nr_freed = 0; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { spin_lock_irq(&l3->list_lock); p = l3->slabs_free.prev; if (p == &l3->slabs_free) { spin_unlock_irq(&l3->list_lock); goto out; } slabp = list_entry(p, struct slab, list); #if DEBUG BUG_ON(slabp->inuse); #endif list_del(&slabp->list); /* * Safe to drop the lock. The slab is no longer linked * to the cache. */ l3->free_objects -= cache->num; spin_unlock_irq(&l3->list_lock); slab_destroy(cache, slabp); nr_freed++; } out: return nr_freed; } /* Called with cache_chain_mutex held to protect against cpu hotplug */ static int __cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; struct kmem_list3 *l3; drain_cpu_caches(cachep); check_irq_on(); for_each_online_node(i) { l3 = cachep->nodelists[i]; if (!l3) continue; drain_freelist(cachep, l3, l3->free_objects); ret += !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); } return (ret ? 1 : 0); } /** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */ int kmem_cache_shrink(struct kmem_cache *cachep) { int ret; BUG_ON(!cachep || in_interrupt()); get_online_cpus(); mutex_lock(&cache_chain_mutex); ret = __cache_shrink(cachep); mutex_unlock(&cache_chain_mutex); put_online_cpus(); return ret; } EXPORT_SYMBOL(kmem_cache_shrink); /** * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * * Remove a &struct kmem_cache object from the slab cache. * * It is expected this function will be called by a module when it is * unloaded. This will remove the cache completely, and avoid a duplicate * cache being allocated each time a module is loaded and unloaded, if the * module doesn't have persistent in-kernel storage across loads and unloads. * * The cache must be empty before calling this function. * * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */ void kmem_cache_destroy(struct kmem_cache *cachep) { BUG_ON(!cachep || in_interrupt()); /* Find the cache in the chain of caches. */ get_online_cpus(); mutex_lock(&cache_chain_mutex); /* * the chain is never empty, cache_cache is never destroyed */ list_del(&cachep->next); if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); list_add(&cachep->next, &cache_chain); mutex_unlock(&cache_chain_mutex); put_online_cpus(); return; } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) rcu_barrier(); __kmem_cache_destroy(cachep); mutex_unlock(&cache_chain_mutex); put_online_cpus(); } EXPORT_SYMBOL(kmem_cache_destroy); /* * Get the memory for a slab management obj. * For a slab cache when the slab descriptor is off-slab, slab descriptors * always come from malloc_sizes caches. The slab descriptor cannot * come from the same cache which is getting created because, * when we are searching for an appropriate cache for these * descriptors in kmem_cache_create, we search through the malloc_sizes array. * If we are creating a malloc_sizes cache here it would not be visible to * kmem_find_general_cachep till the initialization is complete. * Hence we cannot have slabp_cache same as the original cache. */ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, int colour_off, gfp_t local_flags, int nodeid) { struct slab *slabp; if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, local_flags, nodeid); /* * If the first object in the slab is leaked (it's allocated * but no one has a reference to it), we want to make sure * kmemleak does not treat the ->s_mem pointer as a reference * to the object. Otherwise we will not report the leak. */ kmemleak_scan_area(&slabp->list, sizeof(struct list_head), local_flags); if (!slabp) return NULL; } else { slabp = objp + colour_off; colour_off += cachep->slab_size; } slabp->inuse = 0; slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; slabp->nodeid = nodeid; slabp->free = 0; return slabp; } static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) { return (kmem_bufctl_t *) (slabp + 1); } static void cache_init_objs(struct kmem_cache *cachep, struct slab *slabp) { int i; for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slabp, i); #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) poison_obj(cachep, objp, POISON_FREE); if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = NULL; if (cachep->flags & SLAB_RED_ZONE) { *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } /* * Constructors are not allowed to allocate memory from the same * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) cachep->ctor(objp + obj_offset(cachep)); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " end of an object"); if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " start of an object"); } if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) cachep->ctor(objp); #endif slab_bufctl(slabp)[i] = i + 1; } slab_bufctl(slabp)[i - 1] = BUFCTL_END; } static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { if (CONFIG_ZONE_DMA_FLAG) { if (flags & GFP_DMA) BUG_ON(!(cachep->gfpflags & GFP_DMA)); else BUG_ON(cachep->gfpflags & GFP_DMA); } } static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) { void *objp = index_to_obj(cachep, slabp, slabp->free); kmem_bufctl_t next; slabp->inuse++; next = slab_bufctl(slabp)[slabp->free]; #if DEBUG slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; WARN_ON(slabp->nodeid != nodeid); #endif slabp->free = next; return objp; } static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, int nodeid) { unsigned int objnr = obj_to_index(cachep, slabp, objp); #if DEBUG /* Verify that the slab belongs to the intended node */ WARN_ON(slabp->nodeid != nodeid); if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { printk(KERN_ERR "slab: double free detected in cache " "'%s', objp %p\n", cachep->name, objp); BUG(); } #endif slab_bufctl(slabp)[objnr] = slabp->free; slabp->free = objnr; slabp->inuse--; } /* * Map pages beginning at addr to the given cache and slab. This is required * for the slab allocator to be able to lookup the cache and slab of a * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. */ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, void *addr) { int nr_pages; struct page *page; page = virt_to_page(addr); nr_pages = 1; if (likely(!PageCompound(page))) nr_pages <<= cache->gfporder; do { page_set_cache(page, cache); page_set_slab(page, slab); page++; } while (--nr_pages); } /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp) { struct slab *slabp; size_t offset; gfp_t local_flags; struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & GFP_SLAB_BUG_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ offset = l3->colour_next; l3->colour_next++; if (l3->colour_next >= cachep->colour) l3->colour_next = 0; spin_unlock(&l3->list_lock); offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) local_irq_enable(); /* * The test for missing atomic flag is performed here, rather than * the more obvious place, simply to reduce the critical path length * in kmem_cache_alloc(). If a caller is seriously mis-behaving they * will eventually be caught here (where it matters). */ kmem_flagcheck(cachep, flags); /* * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ if (!objp) objp = kmem_getpages(cachep, local_flags, nodeid); if (!objp) goto failed; /* Get slab management. */ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); /* Make slab active. */ list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); l3->free_objects += cachep->num; spin_unlock(&l3->list_lock); return 1; opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); return 0; } #if DEBUG /* * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking */ static void kfree_debugcheck(const void *objp) { if (!virt_addr_valid(objp)) { printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", (unsigned long)objp); BUG(); } } static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) { unsigned long long redzone1, redzone2; redzone1 = *dbg_redzone1(cache, obj); redzone2 = *dbg_redzone2(cache, obj); /* * Redzone is ok. */ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) return; if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) slab_error(cache, "double free detected"); else slab_error(cache, "memory outside object was overwritten"); printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", obj, redzone1, redzone2); } static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, void *caller) { struct page *page; unsigned int objnr; struct slab *slabp; BUG_ON(virt_to_cache(objp) != cachep); objp -= obj_offset(cachep); kfree_debugcheck(objp); page = virt_to_head_page(objp); slabp = page_get_slab(page); if (cachep->flags & SLAB_RED_ZONE) { verify_redzone_free(cachep, objp); *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = caller; objnr = obj_to_index(cachep, slabp, objp); BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); #ifdef CONFIG_DEBUG_SLAB_LEAK slab_bufctl(slabp)[objnr] = BUFCTL_FREE; #endif if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { store_stackinfo(cachep, objp, (unsigned long)caller); kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 0); } else { poison_obj(cachep, objp, POISON_FREE); } #else poison_obj(cachep, objp, POISON_FREE); #endif } return objp; } static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) { kmem_bufctl_t i; int entries = 0; /* Check slab's freelist to see if this obj is there. */ for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { entries++; if (entries > cachep->num || i >= cachep->num) goto bad; } if (entries != cachep->num - slabp->inuse) { bad: printk(KERN_ERR "slab: Internal list corruption detected in " "cache '%s'(%d), slabp %p(%d). Hexdump:\n", cachep->name, cachep->num, slabp, slabp->inuse); for (i = 0; i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); i++) { if (i % 16 == 0) printk("\n%03x:", i); printk(" %02x", ((unsigned char *)slabp)[i]); } printk("\n"); BUG(); } } #else #define kfree_debugcheck(x) do { } while(0) #define cache_free_debugcheck(x,objp,z) (objp) #define check_slabp(x,y) do { } while(0) #endif static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; struct array_cache *ac; int node; retry: check_irq_off(); node = numa_mem_id(); ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* * If there was little recent activity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } l3 = cachep->nodelists[node]; BUG_ON(ac->avail > 0 || !l3); spin_lock(&l3->list_lock); /* See if we can refill from the shared array */ if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { l3->shared->touched = 1; goto alloc_done; } while (batchcount > 0) { struct list_head *entry; struct slab *slabp; /* Get slab alloc is to come from. */ entry = l3->slabs_partial.next; if (entry == &l3->slabs_partial) { l3->free_touched = 1; entry = l3->slabs_free.next; if (entry == &l3->slabs_free) goto must_grow; } slabp = list_entry(entry, struct slab, list); check_slabp(cachep, slabp); check_spinlock_acquired(cachep); /* * The slab was either on partial or free list so * there must be at least one object available for * allocation. */ BUG_ON(slabp->inuse >= cachep->num); while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, node); } check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ list_del(&slabp->list); if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else list_add(&slabp->list, &l3->slabs_partial); } must_grow: l3->free_objects -= ac->avail; alloc_done: spin_unlock(&l3->list_lock); if (unlikely(!ac->avail)) { int x; x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); if (!x && ac->avail == 0) /* no objects in sight? abort */ return NULL; if (!ac->avail) /* objects refilled by interrupt? */ goto retry; } ac->touched = 1; return ac->entry[--ac->avail]; } static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG kmem_flagcheck(cachep, flags); #endif } #if DEBUG static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, void *objp, void *caller) { if (!objp) return objp; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else check_poison_obj(cachep, objp); #endif poison_obj(cachep, objp, POISON_INUSE); } if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = caller; if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { slab_error(cachep, "double free, or memory outside" " object was overwritten"); printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } #ifdef CONFIG_DEBUG_SLAB_LEAK { struct slab *slabp; unsigned objnr; slabp = page_get_slab(virt_to_head_page(objp)); objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; } #endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) cachep->ctor(objp); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", objp, ARCH_SLAB_MINALIGN); } #endif return objp; } #else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { if (cachep == &cache_cache) return false; return should_failslab(obj_size(cachep), flags, cachep->flags); } static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); ac = cpu_cache_get(cachep); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; objp = ac->entry[--ac->avail]; } else { STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); /* * the 'ac' may be updated by cache_alloc_refill(), * and kmemleak_erase() requires its correct value. */ ac = cpu_cache_get(cachep); } /* * To avoid a false negative, if an object that is in one of the * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ if (objp) kmemleak_erase(&ac->entry[ac->avail]); return objp; } #ifdef CONFIG_NUMA /* * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. */ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { int nid_alloc, nid_here; if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; nid_alloc = nid_here = numa_mem_id(); get_mems_allowed(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) nid_alloc = cpuset_slab_spread_node(); else if (current->mempolicy) nid_alloc = slab_node(current->mempolicy); put_mems_allowed(); if (nid_alloc != nid_here) return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; } /* * Fallback function if there was no memory available and no objects on a * certain node and fall back is permitted. First we scan all the * available nodelists for available objects. If that fails then we * perform an allocation without specifying a node. This allows the page * allocator to do its reclaim / fallback magic. We then insert the * slab into the proper nodelist and then allocate from it. */ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) { struct zonelist *zonelist; gfp_t local_flags; struct zoneref *z; struct zone *zone; enum zone_type high_zoneidx = gfp_zone(flags); void *obj = NULL; int nid; if (flags & __GFP_THISNODE) return NULL; get_mems_allowed(); zonelist = node_zonelist(slab_node(current->mempolicy), flags); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); retry: /* * Look through allowed nodes for objects available * from existing per node queues. */ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { nid = zone_to_nid(zone); if (cpuset_zone_allowed_hardwall(zone, flags) && cache->nodelists[nid] && cache->nodelists[nid]->free_objects) { obj = ____cache_alloc_node(cache, flags | GFP_THISNODE, nid); if (obj) break; } } if (!obj) { /* * This allocation will be performed within the constraints * of the current cpuset / memory policy requirements. * We may trigger various forms of reclaim on the allowed * set and go into memory reserves if necessary. */ if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); obj = kmem_getpages(cache, local_flags, numa_mem_id()); if (local_flags & __GFP_WAIT) local_irq_disable(); if (obj) { /* * Insert into the appropriate per node queues */ nid = page_to_nid(virt_to_page(obj)); if (cache_grow(cache, flags, nid, obj)) { obj = ____cache_alloc_node(cache, flags | GFP_THISNODE, nid); if (!obj) /* * Another processor may allocate the * objects in the slab since we are * not holding any locks. */ goto retry; } else { /* cache_grow already freed obj */ obj = NULL; } } } put_mems_allowed(); return obj; } /* * A interface to enable slab creation on nodeid */ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct list_head *entry; struct slab *slabp; struct kmem_list3 *l3; void *obj; int x; l3 = cachep->nodelists[nodeid]; BUG_ON(!l3); retry: check_irq_off(); spin_lock(&l3->list_lock); entry = l3->slabs_partial.next; if (entry == &l3->slabs_partial) { l3->free_touched = 1; entry = l3->slabs_free.next; if (entry == &l3->slabs_free) goto must_grow; } slabp = list_entry(entry, struct slab, list); check_spinlock_acquired_node(cachep, nodeid); check_slabp(cachep, slabp); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); BUG_ON(slabp->inuse == cachep->num); obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); l3->free_objects--; /* move slabp to correct slabp list: */ list_del(&slabp->list); if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else list_add(&slabp->list, &l3->slabs_partial); spin_unlock(&l3->list_lock); goto done; must_grow: spin_unlock(&l3->list_lock); x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); if (x) goto retry; return fallback_alloc(cachep, flags); done: return obj; } /** * kmem_cache_alloc_node - Allocate an object on the specified node * @cachep: The cache to allocate from. * @flags: See kmalloc(). * @nodeid: node number of the target node. * @caller: return address of caller, used for debug information * * Identical to kmem_cache_alloc but it will allocate memory on the given * node, which can improve the performance for cpu bound structures. * * Fallback to other node is possible if __GFP_THISNODE is not set. */ static __always_inline void * __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *caller) { unsigned long save_flags; void *ptr; int slab_node = numa_mem_id(); flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); if (nodeid == -1) nodeid = slab_node; if (unlikely(!cachep->nodelists[nodeid])) { /* Node not bootstrapped yet */ ptr = fallback_alloc(cachep, flags); goto out; } if (nodeid == slab_node) { /* * Use the locally cached objects if possible. * However ____cache_alloc does not allow fallback * to other nodes. It may fail while we still have * objects on other nodes available. */ ptr = ____cache_alloc(cachep, flags); if (ptr) goto out; } /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, flags); if (likely(ptr)) kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); if (unlikely((flags & __GFP_ZERO) && ptr)) memset(ptr, 0, obj_size(cachep)); return ptr; } static __always_inline void * __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { objp = alternate_node_alloc(cache, flags); if (objp) goto out; } objp = ____cache_alloc(cache, flags); /* * We may just have run out of memory on the local node. * ____cache_alloc_node() knows how to locate memory on other nodes */ if (!objp) objp = ____cache_alloc_node(cache, flags, numa_mem_id()); out: return objp; } #else static __always_inline void * __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { return ____cache_alloc(cachep, flags); } #endif /* CONFIG_NUMA */ static __always_inline void * __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) { unsigned long save_flags; void *objp; flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); if (slab_should_failslab(cachep, flags)) return NULL; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, flags); prefetchw(objp); if (likely(objp)) kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); if (unlikely((flags & __GFP_ZERO) && objp)) memset(objp, 0, obj_size(cachep)); return objp; } /* * Caller needs to acquire correct kmem_list's list_lock */ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, int node) { int i; struct kmem_list3 *l3; for (i = 0; i < nr_objects; i++) { void *objp = objpp[i]; struct slab *slabp; slabp = virt_to_slab(objp); l3 = cachep->nodelists[node]; list_del(&slabp->list); check_spinlock_acquired_node(cachep, node); check_slabp(cachep, slabp); slab_put_obj(cachep, slabp, objp, node); STATS_DEC_ACTIVE(cachep); l3->free_objects++; check_slabp(cachep, slabp); /* fixup slab chains */ if (slabp->inuse == 0) { if (l3->free_objects > l3->free_limit) { l3->free_objects -= cachep->num; /* No need to drop any previously held * lock here, even if we have a off-slab slab * descriptor it is guaranteed to come from * a different cache, refer to comments before * alloc_slabmgmt. */ slab_destroy(cachep, slabp); } else { list_add(&slabp->list, &l3->slabs_free); } } else { /* Unconditionally move a slab to the end of the * partial list on free - maximum time for the * other objects to be freed, too. */ list_add_tail(&slabp->list, &l3->slabs_partial); } } } static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) { int batchcount; struct kmem_list3 *l3; int node = numa_mem_id(); batchcount = ac->batchcount; #if DEBUG BUG_ON(!batchcount || batchcount > ac->avail); #endif check_irq_off(); l3 = cachep->nodelists[node]; spin_lock(&l3->list_lock); if (l3->shared) { struct array_cache *shared_array = l3->shared; int max = shared_array->limit - shared_array->avail; if (max) { if (batchcount > max) batchcount = max; memcpy(&(shared_array->entry[shared_array->avail]), ac->entry, sizeof(void *) * batchcount); shared_array->avail += batchcount; goto free_done; } } free_block(cachep, ac->entry, batchcount, node); free_done: #if STATS { int i = 0; struct list_head *p; p = l3->slabs_free.next; while (p != &(l3->slabs_free)) { struct slab *slabp; slabp = list_entry(p, struct slab, list); BUG_ON(slabp->inuse); i++; p = p->next; } STATS_SET_FREEABLE(cachep, i); } #endif spin_unlock(&l3->list_lock); ac->avail -= batchcount; memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); } /* * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ static inline void __cache_free(struct kmem_cache *cachep, void *objp) { struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); kmemcheck_slab_free(cachep, objp, obj_size(cachep)); /* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which * is per page memory reference) to get nodeid. Instead use a global * variable to skip the call, which is mostly likely to be present in * the cache. */ if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return; if (likely(ac->avail < ac->limit)) { STATS_INC_FREEHIT(cachep); ac->entry[ac->avail++] = objp; return; } else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); ac->entry[ac->avail++] = objp; } } /** * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. * @flags: See kmalloc(). * * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); trace_kmem_cache_alloc(_RET_IP_, ret, obj_size(cachep), cachep->buffer_size, flags); return ret; } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) { return __cache_alloc(cachep, flags, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc_notrace); #endif /** * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. * @cachep: the cache we're checking against * @ptr: pointer to validate * * This verifies that the untrusted pointer looks sane; * it is _not_ a guarantee that the pointer is actually * part of the slab cache in question, but it at least * validates that the pointer can be dereferenced and * looks half-way sane. * * Currently only used for dentry validation. */ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) { unsigned long size = cachep->buffer_size; struct page *page; if (unlikely(!kern_ptr_validate(ptr, size))) goto out; page = virt_to_page(ptr); if (unlikely(!PageSlab(page))) goto out; if (unlikely(page_get_cache(page) != cachep)) goto out; return 1; out: return 0; } #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = __cache_alloc_node(cachep, flags, nodeid, __builtin_return_address(0)); trace_kmem_cache_alloc_node(_RET_IP_, ret, obj_size(cachep), cachep->buffer_size, flags, nodeid); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node); #ifdef CONFIG_TRACING void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return __cache_alloc_node(cachep, flags, nodeid, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); #endif static __always_inline void * __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) { struct kmem_cache *cachep; void *ret; cachep = kmem_find_general_cachep(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; ret = kmem_cache_alloc_node_notrace(cachep, flags, node); trace_kmalloc_node((unsigned long) caller, ret, size, cachep->buffer_size, flags, node); return ret; } #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc_node); void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, unsigned long caller) { return __do_kmalloc_node(size, flags, node, (void *)caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); #else void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, NULL); } EXPORT_SYMBOL(__kmalloc_node); #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_NUMA */ /** * __do_kmalloc - allocate memory * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). * @caller: function caller for debug tracking of the caller */ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, void *caller) { struct kmem_cache *cachep; void *ret; /* If you want to save a few bytes .text space: replace * __ with kmem_. * Then kmalloc uses the uninlined functions instead of the inline * functions. */ cachep = __find_general_cachep(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; ret = __cache_alloc(cachep, flags, caller); trace_kmalloc((unsigned long) caller, ret, size, cachep->buffer_size, flags); return ret; } #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc); void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) { return __do_kmalloc(size, flags, (void *)caller); } EXPORT_SYMBOL(__kmalloc_track_caller); #else void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, NULL); } EXPORT_SYMBOL(__kmalloc); #endif /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. * * Free an object which was previously allocated from this * cache. */ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; local_irq_save(flags); debug_check_no_locks_freed(objp, obj_size(cachep)); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, obj_size(cachep)); __cache_free(cachep, objp); local_irq_restore(flags); trace_kmem_cache_free(_RET_IP_, objp); } EXPORT_SYMBOL(kmem_cache_free); /** * kfree - free previously allocated memory * @objp: pointer returned by kmalloc. * * If @objp is NULL, no operation is performed. * * Don't free memory not originally allocated by kmalloc() * or you will run into trouble. */ void kfree(const void *objp) { struct kmem_cache *c; unsigned long flags; trace_kfree(_RET_IP_, objp); if (unlikely(ZERO_OR_NULL_PTR(objp))) return; local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); debug_check_no_obj_freed(objp, obj_size(c)); __cache_free(c, (void *)objp); local_irq_restore(flags); } EXPORT_SYMBOL(kfree); unsigned int kmem_cache_size(struct kmem_cache *cachep) { return obj_size(cachep); } EXPORT_SYMBOL(kmem_cache_size); const char *kmem_cache_name(struct kmem_cache *cachep) { return cachep->name; } EXPORT_SYMBOL_GPL(kmem_cache_name); /* * This initializes kmem_list3 or resizes various caches for all nodes. */ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) { int node; struct kmem_list3 *l3; struct array_cache *new_shared; struct array_cache **new_alien = NULL; for_each_online_node(node) { if (use_alien_caches) { new_alien = alloc_alien_cache(node, cachep->limit, gfp); if (!new_alien) goto fail; } new_shared = NULL; if (cachep->shared) { new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 0xbaadf00d, gfp); if (!new_shared) { free_alien_cache(new_alien); goto fail; } } l3 = cachep->nodelists[node]; if (l3) { struct array_cache *shared = l3->shared; spin_lock_irq(&l3->list_lock); if (shared) free_block(cachep, shared->entry, shared->avail, node); l3->shared = new_shared; if (!l3->alien) { l3->alien = new_alien; new_alien = NULL; } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(new_alien); continue; } l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); if (!l3) { free_alien_cache(new_alien); kfree(new_shared); goto fail; } kmem_list3_init(l3); l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; l3->shared = new_shared; l3->alien = new_alien; l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; cachep->nodelists[node] = l3; } return 0; fail: if (!cachep->next.next) { /* Cache is not active yet. Roll back what we did */ node--; while (node >= 0) { if (cachep->nodelists[node]) { l3 = cachep->nodelists[node]; kfree(l3->shared); free_alien_cache(l3->alien); kfree(l3); cachep->nodelists[node] = NULL; } node--; } } return -ENOMEM; } struct ccupdate_struct { struct kmem_cache *cachep; struct array_cache *new[NR_CPUS]; }; static void do_ccupdate_local(void *info) { struct ccupdate_struct *new = info; struct array_cache *old; check_irq_off(); old = cpu_cache_get(new->cachep); new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; new->new[smp_processor_id()] = old; } /* Always called with the cache_chain_mutex held */ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { struct ccupdate_struct *new; int i; new = kzalloc(sizeof(*new), gfp); if (!new) return -ENOMEM; for_each_online_cpu(i) { new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, batchcount, gfp); if (!new->new[i]) { for (i--; i >= 0; i--) kfree(new->new[i]); kfree(new); return -ENOMEM; } } new->cachep = cachep; on_each_cpu(do_ccupdate_local, (void *)new, 1); check_irq_on(); cachep->batchcount = batchcount; cachep->limit = limit; cachep->shared = shared; for_each_online_cpu(i) { struct array_cache *ccold = new->new[i]; if (!ccold) continue; spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); kfree(ccold); } kfree(new); return alloc_kmemlist(cachep, gfp); } /* Called with cache_chain_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { int err; int limit, shared; /* * The head array serves three purposes: * - create a LIFO ordering, i.e. return objects that are cache-warm * - reduce the number of spinlock operations. * - reduce the number of linked list operations on the slab and * bufctl chains: array operations are cheaper. * The numbers are guessed, we should auto-tune as described by * Bonwick. */ if (cachep->buffer_size > 131072) limit = 1; else if (cachep->buffer_size > PAGE_SIZE) limit = 8; else if (cachep->buffer_size > 1024) limit = 24; else if (cachep->buffer_size > 256) limit = 54; else limit = 120; /* * CPU bound tasks (e.g. network routing) can exhibit cpu bound * allocation behaviour: Most allocs on one cpu, most free operations * on another cpu. For these cases, an efficient object passing between * cpus is necessary. This is provided by a shared array. The array * replaces Bonwick's magazine layer. * On uniprocessor, it's functionally equivalent (but less efficient) * to a larger limit. Thus disabled by default. */ shared = 0; if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) shared = 8; #if DEBUG /* * With debugging enabled, large batchcount lead to excessively long * periods with disabled local interrupts. Limit the batchcount */ if (limit > 32) limit = 32; #endif err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); if (err) printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", cachep->name, -err); return err; } /* * Drain an array if it contains any elements taking the l3 lock only if * necessary. Note that the l3 listlock also protects the array_cache * if drain_array() is used on the shared array. */ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) { int tofree; if (!ac || !ac->avail) return; if (ac->touched && !force) { ac->touched = 0; } else { spin_lock_irq(&l3->list_lock); if (ac->avail) { tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) tofree = (ac->avail + 1) / 2; free_block(cachep, ac->entry, tofree, node); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } spin_unlock_irq(&l3->list_lock); } } /** * cache_reap - Reclaim memory from caches. * @w: work descriptor * * Called from workqueue/eventd every few seconds. * Purpose: * - clear the per-cpu caches for this CPU. * - return freeable pages to the main free memory pool. * * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ static void cache_reap(struct work_struct *w) { struct kmem_cache *searchp; struct kmem_list3 *l3; int node = numa_mem_id(); struct delayed_work *work = to_delayed_work(w); if (!mutex_trylock(&cache_chain_mutex)) /* Give up. Setup the next iteration. */ goto out; list_for_each_entry(searchp, &cache_chain, next) { check_irq_on(); /* * We only take the l3 lock if absolutely necessary and we * have established with reasonable certainty that * we can do some work if the lock was obtained. */ l3 = searchp->nodelists[node]; reap_alien(searchp, l3); drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); /* * These are racy checks but it does not matter * if we skip one check or scan twice. */ if (time_after(l3->next_reap, jiffies)) goto next; l3->next_reap = jiffies + REAPTIMEOUT_LIST3; drain_array(searchp, l3, l3->shared, 0, node); if (l3->free_touched) l3->free_touched = 0; else { int freed; freed = drain_freelist(searchp, l3, (l3->free_limit + 5 * searchp->num - 1) / (5 * searchp->num)); STATS_ADD_REAPED(searchp, freed); } next: cond_resched(); } check_irq_on(); mutex_unlock(&cache_chain_mutex); next_reap_node(); out: /* Set up the next iteration */ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); } #ifdef CONFIG_SLABINFO static void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it * without _too_ many complaints. */ #if STATS seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); #else seq_puts(m, "slabinfo - version: 2.1\n"); #endif seq_puts(m, "# name <active_objs> <num_objs> <objsize> " "<objperslab> <pagesperslab>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); #if STATS seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); #endif seq_putc(m, '\n'); } static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; mutex_lock(&cache_chain_mutex); if (!n) print_slabinfo_header(m); return seq_list_start(&cache_chain, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &cache_chain, pos); } static void s_stop(struct seq_file *m, void *p) { mutex_unlock(&cache_chain_mutex); } static int s_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); struct slab *slabp; unsigned long active_objs; unsigned long num_objs; unsigned long active_slabs = 0; unsigned long num_slabs, free_objects = 0, shared_avail = 0; const char *name; char *error = NULL; int node; struct kmem_list3 *l3; active_objs = 0; num_slabs = 0; for_each_online_node(node) { l3 = cachep->nodelists[node]; if (!l3) continue; check_irq_on(); spin_lock_irq(&l3->list_lock); list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) error = "slabs_full accounting error"; active_objs += cachep->num; active_slabs++; } list_for_each_entry(slabp, &l3->slabs_partial, list) { if (slabp->inuse == cachep->num && !error) error = "slabs_partial inuse accounting error"; if (!slabp->inuse && !error) error = "slabs_partial/inuse accounting error"; active_objs += slabp->inuse; active_slabs++; } list_for_each_entry(slabp, &l3->slabs_free, list) { if (slabp->inuse && !error) error = "slabs_free/inuse accounting error"; num_slabs++; } free_objects += l3->free_objects; if (l3->shared) shared_avail += l3->shared->avail; spin_unlock_irq(&l3->list_lock); } num_slabs += active_slabs; num_objs = num_slabs * cachep->num; if (num_objs - active_objs != free_objects && !error) error = "free_objects accounting error"; name = cachep->name; if (error) printk(KERN_ERR "slab: cache %s error: %s\n", name, error); seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", name, active_objs, num_objs, cachep->buffer_size, cachep->num, (1 << cachep->gfporder)); seq_printf(m, " : tunables %4u %4u %4u", cachep->limit, cachep->batchcount, cachep->shared); seq_printf(m, " : slabdata %6lu %6lu %6lu", active_slabs, num_slabs, shared_avail); #if STATS { /* list3 stats */ unsigned long high = cachep->high_mark; unsigned long allocs = cachep->num_allocations; unsigned long grown = cachep->grown; unsigned long reaped = cachep->reaped; unsigned long errors = cachep->errors; unsigned long max_freeable = cachep->max_freeable; unsigned long node_allocs = cachep->node_allocs; unsigned long node_frees = cachep->node_frees; unsigned long overflows = cachep->node_overflow; seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " "%4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees, overflows); } /* cpu stats */ { unsigned long allochit = atomic_read(&cachep->allochit); unsigned long allocmiss = atomic_read(&cachep->allocmiss); unsigned long freehit = atomic_read(&cachep->freehit); unsigned long freemiss = atomic_read(&cachep->freemiss); seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", allochit, allocmiss, freehit, freemiss); } #endif seq_putc(m, '\n'); return 0; } /* * slabinfo_op - iterator that generates /proc/slabinfo * * Output layout: * cache-name * num-active-objs * total-objs * object size * num-active-slabs * total-slabs * num-pages-per-slab * + further values on SMP and with statistics enabled */ static const struct seq_operations slabinfo_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator * @file: unused * @buffer: user buffer * @count: data length * @ppos: unused */ ssize_t slabinfo_write(struct file *file, const char __user * buffer, size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; int limit, batchcount, shared, res; struct kmem_cache *cachep; if (count > MAX_SLABINFO_WRITE) return -EINVAL; if (copy_from_user(&kbuf, buffer, count)) return -EFAULT; kbuf[MAX_SLABINFO_WRITE] = '\0'; tmp = strchr(kbuf, ' '); if (!tmp) return -EINVAL; *tmp = '\0'; tmp++; if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) return -EINVAL; /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); res = -EINVAL; list_for_each_entry(cachep, &cache_chain, next) { if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { res = 0; } else { res = do_tune_cpucache(cachep, limit, batchcount, shared, GFP_KERNEL); } break; } } mutex_unlock(&cache_chain_mutex); if (res >= 0) res = count; return res; } static int slabinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &slabinfo_op); } static const struct file_operations proc_slabinfo_operations = { .open = slabinfo_open, .read = seq_read, .write = slabinfo_write, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_DEBUG_SLAB_LEAK static void *leaks_start(struct seq_file *m, loff_t *pos) { mutex_lock(&cache_chain_mutex); return seq_list_start(&cache_chain, *pos); } static inline int add_caller(unsigned long *n, unsigned long v) { unsigned long *p; int l; if (!v) return 1; l = n[1]; p = n + 2; while (l) { int i = l/2; unsigned long *q = p + 2 * i; if (*q == v) { q[1]++; return 1; } if (*q > v) { l = i; } else { p = q + 2; l -= i + 1; } } if (++n[1] == n[0]) return 0; memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); p[0] = v; p[1] = 1; return 1; } static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) { void *p; int i; if (n[0] == n[1]) return; for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) continue; if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) return; } } static void show_symbol(struct seq_file *m, unsigned long address) { #ifdef CONFIG_KALLSYMS unsigned long offset, size; char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { seq_printf(m, "%s+%#lx/%#lx", name, offset, size); if (modname[0]) seq_printf(m, " [%s]", modname); return; } #endif seq_printf(m, "%p", (void *)address); } static int leaks_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); struct slab *slabp; struct kmem_list3 *l3; const char *name; unsigned long *n = m->private; int node; int i; if (!(cachep->flags & SLAB_STORE_USER)) return 0; if (!(cachep->flags & SLAB_RED_ZONE)) return 0; /* OK, we can do it */ n[1] = 0; for_each_online_node(node) { l3 = cachep->nodelists[node]; if (!l3) continue; check_irq_on(); spin_lock_irq(&l3->list_lock); list_for_each_entry(slabp, &l3->slabs_full, list) handle_slab(n, cachep, slabp); list_for_each_entry(slabp, &l3->slabs_partial, list) handle_slab(n, cachep, slabp); spin_unlock_irq(&l3->list_lock); } name = cachep->name; if (n[0] == n[1]) { /* Increase the buffer size */ mutex_unlock(&cache_chain_mutex); m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); if (!m->private) { /* Too bad, we are really out */ m->private = n; mutex_lock(&cache_chain_mutex); return -ENOMEM; } *(unsigned long *)m->private = n[0] * 2; kfree(n); mutex_lock(&cache_chain_mutex); /* Now make sure this entry will be retried */ m->count = m->size; return 0; } for (i = 0; i < n[1]; i++) { seq_printf(m, "%s: %lu ", name, n[2*i+3]); show_symbol(m, n[2*i+2]); seq_putc(m, '\n'); } return 0; } static const struct seq_operations slabstats_op = { .start = leaks_start, .next = s_next, .stop = s_stop, .show = leaks_show, }; static int slabstats_open(struct inode *inode, struct file *file) { unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); int ret = -ENOMEM; if (n) { ret = seq_open(file, &slabstats_op); if (!ret) { struct seq_file *m = file->private_data; *n = PAGE_SIZE / (2 * sizeof(unsigned long)); m->private = n; n = NULL; } kfree(n); } return ret; } static const struct file_operations proc_slabstats_operations = { .open = slabstats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static int __init slab_proc_init(void) { proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif return 0; } module_init(slab_proc_init); #endif /** * ksize - get the actual amount of memory allocated for a given object * @objp: Pointer to the object * * kmalloc may internally round up allocations and return more memory * than requested. ksize() can be used to determine the actual amount of * memory allocated. The caller may use this additional memory, even though * a smaller amount of memory was initially specified with the kmalloc call. * The caller must guarantee that objp points to a valid object previously * allocated with either kmalloc() or kmem_cache_alloc(). The object * must not be freed during the duration of the call. */ size_t ksize(const void *objp) { BUG_ON(!objp); if (unlikely(objp == ZERO_SIZE_PTR)) return 0; return obj_size(virt_to_cache(objp)); } EXPORT_SYMBOL(ksize);
gpl-2.0
hafidzduddin/codina
arch/arm/mach-davinci/board-sffsdr.c
499
4535
/* * Lyrtech SFFSDR board support. * * Copyright (C) 2008 Philip Balister, OpenSDR <philip@opensdr.com> * Copyright (C) 2008 Lyrtech <www.lyrtech.com> * * Based on DV-EVM platform, original copyright follows: * * Copyright (C) 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <mach/dm644x.h> #include <mach/common.h> #include <mach/i2c.h> #include <mach/serial.h> #include <mach/mux.h> #include <mach/usb.h> #define SFFSDR_PHY_ID "0:01" static struct mtd_partition davinci_sffsdr_nandflash_partition[] = { /* U-Boot Environment: Block 0 * UBL: Block 1 * U-Boot: Blocks 6-7 (256 kb) * Integrity Kernel: Blocks 8-31 (3 Mb) * Integrity Data: Blocks 100-END */ { .name = "Linux Kernel", .offset = 32 * SZ_128K, .size = 16 * SZ_128K, /* 2 Mb */ .mask_flags = MTD_WRITEABLE, /* Force read-only */ }, { .name = "Linux ROOT", .offset = MTDPART_OFS_APPEND, .size = 256 * SZ_128K, /* 32 Mb */ .mask_flags = 0, /* R/W */ }, }; static struct flash_platform_data davinci_sffsdr_nandflash_data = { .parts = davinci_sffsdr_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_sffsdr_nandflash_partition), }; static struct resource davinci_sffsdr_nandflash_resource[] = { { .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE, .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1, .flags = IORESOURCE_MEM, }, { .start = DM644X_ASYNC_EMIF_CONTROL_BASE, .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device davinci_sffsdr_nandflash_device = { .name = "davinci_nand", /* Name of driver */ .id = 0, .dev = { .platform_data = &davinci_sffsdr_nandflash_data, }, .num_resources = ARRAY_SIZE(davinci_sffsdr_nandflash_resource), .resource = davinci_sffsdr_nandflash_resource, }; static struct at24_platform_data eeprom_info = { .byte_len = (64*1024) / 8, .page_size = 32, .flags = AT24_FLAG_ADDR16, }; static struct i2c_board_info __initdata i2c_info[] = { { I2C_BOARD_INFO("24lc64", 0x50), .platform_data = &eeprom_info, }, /* Other I2C devices: * MSP430, addr 0x23 (not used) * PCA9543, addr 0x70 (setup done by U-Boot) * ADS7828, addr 0x48 (ADC for voltage monitoring.) */ }; static struct davinci_i2c_platform_data i2c_pdata = { .bus_freq = 20 /* kHz */, .bus_delay = 100 /* usec */, }; static void __init sffsdr_init_i2c(void) { davinci_init_i2c(&i2c_pdata); i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info)); } static struct platform_device *davinci_sffsdr_devices[] __initdata = { &davinci_sffsdr_nandflash_device, }; static struct davinci_uart_config uart_config __initdata = { .enabled_uarts = (1 << 0), }; static void __init davinci_sffsdr_map_io(void) { dm644x_init(); } static __init void davinci_sffsdr_init(void) { struct davinci_soc_info *soc_info = &davinci_soc_info; platform_add_devices(davinci_sffsdr_devices, ARRAY_SIZE(davinci_sffsdr_devices)); sffsdr_init_i2c(); davinci_serial_init(&uart_config); soc_info->emac_pdata->phy_id = SFFSDR_PHY_ID; davinci_setup_usb(0, 0); /* We support only peripheral mode. */ /* mux VLYNQ pins */ davinci_cfg_reg(DM644X_VLYNQEN); davinci_cfg_reg(DM644X_VLYNQWD); } MACHINE_START(SFFSDR, "Lyrtech SFFSDR") /* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */ .boot_params = (DAVINCI_DDR_BASE + 0x100), .map_io = davinci_sffsdr_map_io, .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = davinci_sffsdr_init, .dma_zone_size = SZ_128M, MACHINE_END
gpl-2.0
helio-x20/linux
drivers/clk/clk-nomadik.c
499
13864
/* * Nomadik clock implementation * Copyright (C) 2013 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * Author: Linus Walleij <linus.walleij@linaro.org> */ #define pr_fmt(fmt) "Nomadik SRC clocks: " fmt #include <linux/bitops.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/reboot.h> /* * The Nomadik clock tree is described in the STN8815A12 DB V4.2 * reference manual for the chip, page 94 ff. * Clock IDs are in the STn8815 Reference Manual table 3, page 27. */ #define SRC_CR 0x00U #define SRC_CR_T0_ENSEL BIT(15) #define SRC_CR_T1_ENSEL BIT(17) #define SRC_CR_T2_ENSEL BIT(19) #define SRC_CR_T3_ENSEL BIT(21) #define SRC_CR_T4_ENSEL BIT(23) #define SRC_CR_T5_ENSEL BIT(25) #define SRC_CR_T6_ENSEL BIT(27) #define SRC_CR_T7_ENSEL BIT(29) #define SRC_XTALCR 0x0CU #define SRC_XTALCR_XTALTIMEN BIT(20) #define SRC_XTALCR_SXTALDIS BIT(19) #define SRC_XTALCR_MXTALSTAT BIT(2) #define SRC_XTALCR_MXTALEN BIT(1) #define SRC_XTALCR_MXTALOVER BIT(0) #define SRC_PLLCR 0x10U #define SRC_PLLCR_PLLTIMEN BIT(29) #define SRC_PLLCR_PLL2EN BIT(28) #define SRC_PLLCR_PLL1STAT BIT(2) #define SRC_PLLCR_PLL1EN BIT(1) #define SRC_PLLCR_PLL1OVER BIT(0) #define SRC_PLLFR 0x14U #define SRC_PCKEN0 0x24U #define SRC_PCKDIS0 0x28U #define SRC_PCKENSR0 0x2CU #define SRC_PCKSR0 0x30U #define SRC_PCKEN1 0x34U #define SRC_PCKDIS1 0x38U #define SRC_PCKENSR1 0x3CU #define SRC_PCKSR1 0x40U /* Lock protecting the SRC_CR register */ static DEFINE_SPINLOCK(src_lock); /* Base address of the SRC */ static void __iomem *src_base; static int nomadik_clk_reboot_handler(struct notifier_block *this, unsigned long code, void *unused) { u32 val; /* The main chrystal need to be enabled for reboot to work */ val = readl(src_base + SRC_XTALCR); val &= ~SRC_XTALCR_MXTALOVER; val |= SRC_XTALCR_MXTALEN; pr_crit("force-enabling MXTALO\n"); writel(val, src_base + SRC_XTALCR); return NOTIFY_OK; } static struct notifier_block nomadik_clk_reboot_notifier = { .notifier_call = nomadik_clk_reboot_handler, }; static const struct of_device_id nomadik_src_match[] __initconst = { { .compatible = "stericsson,nomadik-src" }, { /* sentinel */ } }; static void __init nomadik_src_init(void) { struct device_node *np; u32 val; np = of_find_matching_node(NULL, nomadik_src_match); if (!np) { pr_crit("no matching node for SRC, aborting clock init\n"); return; } src_base = of_iomap(np, 0); if (!src_base) { pr_err("%s: must have src parent node with REGS (%s)\n", __func__, np->name); return; } /* Set all timers to use the 2.4 MHz TIMCLK */ val = readl(src_base + SRC_CR); val |= SRC_CR_T0_ENSEL; val |= SRC_CR_T1_ENSEL; val |= SRC_CR_T2_ENSEL; val |= SRC_CR_T3_ENSEL; val |= SRC_CR_T4_ENSEL; val |= SRC_CR_T5_ENSEL; val |= SRC_CR_T6_ENSEL; val |= SRC_CR_T7_ENSEL; writel(val, src_base + SRC_CR); val = readl(src_base + SRC_XTALCR); pr_info("SXTALO is %s\n", (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); pr_info("MXTAL is %s\n", (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled"); if (of_property_read_bool(np, "disable-sxtalo")) { /* The machine uses an external oscillator circuit */ val |= SRC_XTALCR_SXTALDIS; pr_info("disabling SXTALO\n"); } if (of_property_read_bool(np, "disable-mxtalo")) { /* Disable this too: also run by external oscillator */ val |= SRC_XTALCR_MXTALOVER; val &= ~SRC_XTALCR_MXTALEN; pr_info("disabling MXTALO\n"); } writel(val, src_base + SRC_XTALCR); register_reboot_notifier(&nomadik_clk_reboot_notifier); } /** * struct clk_pll1 - Nomadik PLL1 clock * @hw: corresponding clock hardware entry * @id: PLL instance: 1 or 2 */ struct clk_pll { struct clk_hw hw; int id; }; /** * struct clk_src - Nomadik src clock * @hw: corresponding clock hardware entry * @id: the clock ID * @group1: true if the clock is in group1, else it is in group0 * @clkbit: bit 0...31 corresponding to the clock in each clock register */ struct clk_src { struct clk_hw hw; int id; bool group1; u32 clkbit; }; #define to_pll(_hw) container_of(_hw, struct clk_pll, hw) #define to_src(_hw) container_of(_hw, struct clk_src, hw) static int pll_clk_enable(struct clk_hw *hw) { struct clk_pll *pll = to_pll(hw); u32 val; spin_lock(&src_lock); val = readl(src_base + SRC_PLLCR); if (pll->id == 1) { if (val & SRC_PLLCR_PLL1OVER) { val |= SRC_PLLCR_PLL1EN; writel(val, src_base + SRC_PLLCR); } } else if (pll->id == 2) { val |= SRC_PLLCR_PLL2EN; writel(val, src_base + SRC_PLLCR); } spin_unlock(&src_lock); return 0; } static void pll_clk_disable(struct clk_hw *hw) { struct clk_pll *pll = to_pll(hw); u32 val; spin_lock(&src_lock); val = readl(src_base + SRC_PLLCR); if (pll->id == 1) { if (val & SRC_PLLCR_PLL1OVER) { val &= ~SRC_PLLCR_PLL1EN; writel(val, src_base + SRC_PLLCR); } } else if (pll->id == 2) { val &= ~SRC_PLLCR_PLL2EN; writel(val, src_base + SRC_PLLCR); } spin_unlock(&src_lock); } static int pll_clk_is_enabled(struct clk_hw *hw) { struct clk_pll *pll = to_pll(hw); u32 val; val = readl(src_base + SRC_PLLCR); if (pll->id == 1) { if (val & SRC_PLLCR_PLL1OVER) return !!(val & SRC_PLLCR_PLL1EN); } else if (pll->id == 2) { return !!(val & SRC_PLLCR_PLL2EN); } return 1; } static unsigned long pll_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pll *pll = to_pll(hw); u32 val; val = readl(src_base + SRC_PLLFR); if (pll->id == 1) { u8 mul; u8 div; mul = (val >> 8) & 0x3FU; mul += 2; div = val & 0x07U; return (parent_rate * mul) >> div; } if (pll->id == 2) { u8 mul; mul = (val >> 24) & 0x3FU; mul += 2; return (parent_rate * mul); } /* Unknown PLL */ return 0; } static const struct clk_ops pll_clk_ops = { .enable = pll_clk_enable, .disable = pll_clk_disable, .is_enabled = pll_clk_is_enabled, .recalc_rate = pll_clk_recalc_rate, }; static struct clk * __init pll_clk_register(struct device *dev, const char *name, const char *parent_name, u32 id) { struct clk *clk; struct clk_pll *pll; struct clk_init_data init; if (id != 1 && id != 2) { pr_err("%s: the Nomadik has only PLL 1 & 2\n", __func__); return ERR_PTR(-EINVAL); } pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) { pr_err("%s: could not allocate PLL clk\n", __func__); return ERR_PTR(-ENOMEM); } init.name = name; init.ops = &pll_clk_ops; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); pll->hw.init = &init; pll->id = id; pr_debug("register PLL1 clock \"%s\"\n", name); clk = clk_register(dev, &pll->hw); if (IS_ERR(clk)) kfree(pll); return clk; } /* * The Nomadik SRC clocks are gated, but not in the sense that * you read-modify-write a register. Instead there are separate * clock enable and clock disable registers. Writing a '1' bit in * the enable register for a certain clock ungates that clock without * affecting the other clocks. The disable register works the opposite * way. */ static int src_clk_enable(struct clk_hw *hw) { struct clk_src *sclk = to_src(hw); u32 enreg = sclk->group1 ? SRC_PCKEN1 : SRC_PCKEN0; u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0; writel(sclk->clkbit, src_base + enreg); /* spin until enabled */ while (!(readl(src_base + sreg) & sclk->clkbit)) cpu_relax(); return 0; } static void src_clk_disable(struct clk_hw *hw) { struct clk_src *sclk = to_src(hw); u32 disreg = sclk->group1 ? SRC_PCKDIS1 : SRC_PCKDIS0; u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0; writel(sclk->clkbit, src_base + disreg); /* spin until disabled */ while (readl(src_base + sreg) & sclk->clkbit) cpu_relax(); } static int src_clk_is_enabled(struct clk_hw *hw) { struct clk_src *sclk = to_src(hw); u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0; u32 val = readl(src_base + sreg); return !!(val & sclk->clkbit); } static unsigned long src_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { return parent_rate; } static const struct clk_ops src_clk_ops = { .enable = src_clk_enable, .disable = src_clk_disable, .is_enabled = src_clk_is_enabled, .recalc_rate = src_clk_recalc_rate, }; static struct clk * __init src_clk_register(struct device *dev, const char *name, const char *parent_name, u8 id) { struct clk *clk; struct clk_src *sclk; struct clk_init_data init; sclk = kzalloc(sizeof(*sclk), GFP_KERNEL); if (!sclk) { pr_err("could not allocate SRC clock %s\n", name); return ERR_PTR(-ENOMEM); } init.name = name; init.ops = &src_clk_ops; /* Do not force-disable the static SDRAM controller */ if (id == 2) init.flags = CLK_IGNORE_UNUSED; else init.flags = 0; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); sclk->hw.init = &init; sclk->id = id; sclk->group1 = (id > 31); sclk->clkbit = BIT(id & 0x1f); pr_debug("register clock \"%s\" ID: %d group: %d bits: %08x\n", name, id, sclk->group1, sclk->clkbit); clk = clk_register(dev, &sclk->hw); if (IS_ERR(clk)) kfree(sclk); return clk; } #ifdef CONFIG_DEBUG_FS static u32 src_pcksr0_boot; static u32 src_pcksr1_boot; static const char * const src_clk_names[] = { "HCLKDMA0 ", "HCLKSMC ", "HCLKSDRAM ", "HCLKDMA1 ", "HCLKCLCD ", "PCLKIRDA ", "PCLKSSP ", "PCLKUART0 ", "PCLKSDI ", "PCLKI2C0 ", "PCLKI2C1 ", "PCLKUART1 ", "PCLMSP0 ", "HCLKUSB ", "HCLKDIF ", "HCLKSAA ", "HCLKSVA ", "PCLKHSI ", "PCLKXTI ", "PCLKUART2 ", "PCLKMSP1 ", "PCLKMSP2 ", "PCLKOWM ", "HCLKHPI ", "PCLKSKE ", "PCLKHSEM ", "HCLK3D ", "HCLKHASH ", "HCLKCRYP ", "PCLKMSHC ", "HCLKUSBM ", "HCLKRNG ", "RESERVED ", "RESERVED ", "RESERVED ", "RESERVED ", "CLDCLK ", "IRDACLK ", "SSPICLK ", "UART0CLK ", "SDICLK ", "I2C0CLK ", "I2C1CLK ", "UART1CLK ", "MSPCLK0 ", "USBCLK ", "DIFCLK ", "IPI2CCLK ", "IPBMCCLK ", "HSICLKRX ", "HSICLKTX ", "UART2CLK ", "MSPCLK1 ", "MSPCLK2 ", "OWMCLK ", "RESERVED ", "SKECLK ", "RESERVED ", "3DCLK ", "PCLKMSP3 ", "MSPCLK3 ", "MSHCCLK ", "USBMCLK ", "RNGCCLK ", }; static int nomadik_src_clk_show(struct seq_file *s, void *what) { int i; u32 src_pcksr0 = readl(src_base + SRC_PCKSR0); u32 src_pcksr1 = readl(src_base + SRC_PCKSR1); u32 src_pckensr0 = readl(src_base + SRC_PCKENSR0); u32 src_pckensr1 = readl(src_base + SRC_PCKENSR1); seq_printf(s, "Clock: Boot: Now: Request: ASKED:\n"); for (i = 0; i < ARRAY_SIZE(src_clk_names); i++) { u32 pcksrb = (i < 0x20) ? src_pcksr0_boot : src_pcksr1_boot; u32 pcksr = (i < 0x20) ? src_pcksr0 : src_pcksr1; u32 pckreq = (i < 0x20) ? src_pckensr0 : src_pckensr1; u32 mask = BIT(i & 0x1f); seq_printf(s, "%s %s %s %s\n", src_clk_names[i], (pcksrb & mask) ? "on " : "off", (pcksr & mask) ? "on " : "off", (pckreq & mask) ? "on " : "off"); } return 0; } static int nomadik_src_clk_open(struct inode *inode, struct file *file) { return single_open(file, nomadik_src_clk_show, NULL); } static const struct file_operations nomadik_src_clk_debugfs_ops = { .open = nomadik_src_clk_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init nomadik_src_clk_init_debugfs(void) { /* Vital for multiplatform */ if (!src_base) return -ENODEV; src_pcksr0_boot = readl(src_base + SRC_PCKSR0); src_pcksr1_boot = readl(src_base + SRC_PCKSR1); debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, NULL, NULL, &nomadik_src_clk_debugfs_ops); return 0; } device_initcall(nomadik_src_clk_init_debugfs); #endif static void __init of_nomadik_pll_setup(struct device_node *np) { struct clk *clk = ERR_PTR(-EINVAL); const char *clk_name = np->name; const char *parent_name; u32 pll_id; if (!src_base) nomadik_src_init(); if (of_property_read_u32(np, "pll-id", &pll_id)) { pr_err("%s: PLL \"%s\" missing pll-id property\n", __func__, clk_name); return; } parent_name = of_clk_get_parent_name(np, 0); clk = pll_clk_register(NULL, clk_name, parent_name, pll_id); if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(nomadik_pll_clk, "st,nomadik-pll-clock", of_nomadik_pll_setup); static void __init of_nomadik_hclk_setup(struct device_node *np) { struct clk *clk = ERR_PTR(-EINVAL); const char *clk_name = np->name; const char *parent_name; if (!src_base) nomadik_src_init(); parent_name = of_clk_get_parent_name(np, 0); /* * The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4. */ clk = clk_register_divider(NULL, clk_name, parent_name, 0, src_base + SRC_CR, 13, 2, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &src_lock); if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(nomadik_hclk_clk, "st,nomadik-hclk-clock", of_nomadik_hclk_setup); static void __init of_nomadik_src_clk_setup(struct device_node *np) { struct clk *clk = ERR_PTR(-EINVAL); const char *clk_name = np->name; const char *parent_name; u32 clk_id; if (!src_base) nomadik_src_init(); if (of_property_read_u32(np, "clock-id", &clk_id)) { pr_err("%s: SRC clock \"%s\" missing clock-id property\n", __func__, clk_name); return; } parent_name = of_clk_get_parent_name(np, 0); clk = src_clk_register(NULL, clk_name, parent_name, clk_id); if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(nomadik_src_clk, "st,nomadik-src-clock", of_nomadik_src_clk_setup);
gpl-2.0
perpe/EasyPad_971_Dual_Core
drivers/staging/msm/mddi_sharp.c
3059
25279
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" #define SHARP_QVGA_PRIM 1 #define SHARP_128X128_SECD 2 extern uint32 mddi_host_core_version; static boolean mddi_debug_prim_wait = FALSE; static boolean mddi_sharp_vsync_wake = TRUE; static boolean mddi_sharp_monitor_refresh_value = TRUE; static boolean mddi_sharp_report_refresh_measurements = FALSE; static uint32 mddi_sharp_rows_per_second = 13830; /* 5200000/376 */ static uint32 mddi_sharp_rows_per_refresh = 338; static uint32 mddi_sharp_usecs_per_refresh = 24440; /* (376+338)/5200000 */ static boolean mddi_sharp_debug_60hz_refresh = FALSE; extern mddi_gpio_info_type mddi_gpio; extern boolean mddi_vsync_detect_enabled; static msm_fb_vsync_handler_type mddi_sharp_vsync_handler; static void *mddi_sharp_vsync_handler_arg; static uint16 mddi_sharp_vsync_attempts; static void mddi_sharp_prim_lcd_init(void); static void mddi_sharp_sub_lcd_init(void); static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd); static void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler, void *); static void mddi_sharp_lcd_vsync_detected(boolean detected); static struct msm_panel_common_pdata *mddi_sharp_pdata; #define REG_SYSCTL 0x0000 #define REG_INTR 0x0006 #define REG_CLKCNF 0x000C #define REG_CLKDIV1 0x000E #define REG_CLKDIV2 0x0010 #define REG_GIOD 0x0040 #define REG_GIOA 0x0042 #define REG_AGM 0x010A #define REG_FLFT 0x0110 #define REG_FRGT 0x0112 #define REG_FTOP 0x0114 #define REG_FBTM 0x0116 #define REG_FSTRX 0x0118 #define REG_FSTRY 0x011A #define REG_VRAM 0x0202 #define REG_SSDCTL 0x0330 #define REG_SSD0 0x0332 #define REG_PSTCTL1 0x0400 #define REG_PSTCTL2 0x0402 #define REG_PTGCTL 0x042A #define REG_PTHP 0x042C #define REG_PTHB 0x042E #define REG_PTHW 0x0430 #define REG_PTHF 0x0432 #define REG_PTVP 0x0434 #define REG_PTVB 0x0436 #define REG_PTVW 0x0438 #define REG_PTVF 0x043A #define REG_VBLKS 0x0458 #define REG_VBLKE 0x045A #define REG_SUBCTL 0x0700 #define REG_SUBTCMD 0x0702 #define REG_SUBTCMDD 0x0704 #define REG_REVBYTE 0x0A02 #define REG_REVCNT 0x0A04 #define REG_REVATTR 0x0A06 #define REG_REVFMT 0x0A08 #define SHARP_SUB_UNKNOWN 0xffffffff #define SHARP_SUB_HYNIX 1 #define SHARP_SUB_ROHM 2 static uint32 sharp_subpanel_type = SHARP_SUB_UNKNOWN; static void sub_through_write(int sub_rs, uint32 sub_data) { mddi_queue_register_write(REG_SUBTCMDD, sub_data, FALSE, 0); /* CS=1,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0); /* CS=0,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0); /* CS=0,RD=1,WE=0,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0004 | sub_rs, FALSE, 0); /* CS=0,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0); /* CS=1,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0); } static uint32 sub_through_read(int sub_rs) { uint32 sub_data; /* CS=1,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0); /* CS=0,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0); /* CS=0,RD=1,WE=0,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0002 | sub_rs, TRUE, 0); mddi_queue_register_read(REG_SUBTCMDD, &sub_data, TRUE, 0); /* CS=0,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0); /* CS=1,RD=1,WE=1,RS=sub_rs */ mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0); return sub_data; } static void serigo(uint32 ssd) { uint32 ssdctl; mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0); ssdctl = ((ssdctl & 0xE7) | 0x02); mddi_queue_register_write(REG_SSD0, ssd, FALSE, 0); mddi_queue_register_write(REG_SSDCTL, ssdctl, TRUE, 0); do { mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0); } while ((ssdctl & 0x0002) != 0); if (mddi_debug_prim_wait) mddi_wait(2); } static void mddi_sharp_lcd_powerdown(void) { serigo(0x0131); serigo(0x0300); mddi_wait(40); serigo(0x0135); mddi_wait(20); serigo(0x2122); mddi_wait(20); serigo(0x0201); mddi_wait(20); serigo(0x2100); mddi_wait(20); serigo(0x2000); mddi_wait(20); mddi_queue_register_write(REG_PSTCTL1, 0x1, TRUE, 0); mddi_wait(100); mddi_queue_register_write(REG_PSTCTL1, 0x0, TRUE, 0); mddi_wait(2); mddi_queue_register_write(REG_SYSCTL, 0x1, TRUE, 0); mddi_wait(2); mddi_queue_register_write(REG_CLKDIV1, 0x3, TRUE, 0); mddi_wait(2); mddi_queue_register_write(REG_SSDCTL, 0x0000, TRUE, 0); /* SSDRESET */ mddi_queue_register_write(REG_SYSCTL, 0x0, TRUE, 0); mddi_wait(2); } static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd) { uint32 regdata; int32 level; int max = mfd->panel_info.bl_max; int min = mfd->panel_info.bl_min; if (mddi_sharp_pdata && mddi_sharp_pdata->backlight_level) { level = mddi_sharp_pdata->backlight_level(mfd->bl_level, max, min); if (level < 0) return; /* use Rodem GPIO(2:0) to give 8 levels of backlight (7-0) */ /* Set lower 3 GPIOs as Outputs (set to 0) */ mddi_queue_register_read(REG_GIOA, &regdata, TRUE, 0); mddi_queue_register_write(REG_GIOA, regdata & 0xfff8, TRUE, 0); /* Set lower 3 GPIOs as level */ mddi_queue_register_read(REG_GIOD, &regdata, TRUE, 0); mddi_queue_register_write(REG_GIOD, (regdata & 0xfff8) | (0x07 & level), TRUE, 0); } } static void mddi_sharp_prim_lcd_init(void) { mddi_queue_register_write(REG_SYSCTL, 0x4000, TRUE, 0); mddi_wait(1); mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0); mddi_wait(5); mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0); mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0); /* new reg write below */ if (mddi_sharp_debug_60hz_refresh) mddi_queue_register_write(REG_CLKCNF, 0x070d, FALSE, 0); else mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0); mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0); mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0); mddi_queue_register_write(REG_PTHP, 4, FALSE, 0); mddi_queue_register_write(REG_PTHB, 40, FALSE, 0); mddi_queue_register_write(REG_PTHW, 240, FALSE, 0); if (mddi_sharp_debug_60hz_refresh) mddi_queue_register_write(REG_PTHF, 12, FALSE, 0); else mddi_queue_register_write(REG_PTHF, 92, FALSE, 0); mddi_wait(1); mddi_queue_register_write(REG_PTVP, 1, FALSE, 0); mddi_queue_register_write(REG_PTVB, 2, FALSE, 0); mddi_queue_register_write(REG_PTVW, 320, FALSE, 0); mddi_queue_register_write(REG_PTVF, 15, FALSE, 0); mddi_wait(1); /* vram_color set REG_AGM???? */ mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0); mddi_queue_register_write(REG_SSDCTL, 0x0000, FALSE, 0); mddi_queue_register_write(REG_SSDCTL, 0x0001, TRUE, 0); mddi_wait(1); mddi_queue_register_write(REG_PSTCTL1, 0x0001, TRUE, 0); mddi_wait(10); serigo(0x0701); /* software reset */ mddi_wait(1); /* Wait over 50us */ serigo(0x0400); /* DCLK~ACHSYNC~ACVSYNC polarity setting */ serigo(0x2900); /* EEPROM start read address setting */ serigo(0x2606); /* EEPROM start read register setting */ mddi_wait(20); /* Wait over 20ms */ serigo(0x0503); /* Horizontal timing setting */ serigo(0x062C); /* Veritical timing setting */ serigo(0x2001); /* power initialize setting(VDC2) */ mddi_wait(20); /* Wait over 20ms */ serigo(0x2120); /* Initialize power setting(CPS) */ mddi_wait(20); /* Wait over 20ms */ serigo(0x2130); /* Initialize power setting(CPS) */ mddi_wait(20); /* Wait over 20ms */ serigo(0x2132); /* Initialize power setting(CPS) */ mddi_wait(10); /* Wait over 10ms */ serigo(0x2133); /* Initialize power setting(CPS) */ mddi_wait(20); /* Wait over 20ms */ serigo(0x0200); /* Panel initialize release(INIT) */ mddi_wait(1); /* Wait over 1ms */ serigo(0x0131); /* Panel setting(CPS) */ mddi_wait(1); /* Wait over 1ms */ mddi_queue_register_write(REG_PSTCTL1, 0x0003, TRUE, 0); /* if (FFA LCD is upside down) -> serigo(0x0100); */ serigo(0x0130); /* Black mask release(display ON) */ mddi_wait(1); /* Wait over 1ms */ if (mddi_sharp_vsync_wake) { mddi_queue_register_write(REG_VBLKS, 0x1001, TRUE, 0); mddi_queue_register_write(REG_VBLKE, 0x1002, TRUE, 0); } /* Set the MDP pixel data attributes for Primary Display */ mddi_host_write_pix_attr_reg(0x00C3); return; } void mddi_sharp_sub_lcd_init(void) { mddi_queue_register_write(REG_SYSCTL, 0x4000, FALSE, 0); mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0); mddi_wait(100); mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0); mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0); mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0); mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0); mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0); mddi_queue_register_write(REG_PTHP, 4, FALSE, 0); mddi_queue_register_write(REG_PTHB, 40, FALSE, 0); mddi_queue_register_write(REG_PTHW, 128, FALSE, 0); mddi_queue_register_write(REG_PTHF, 92, FALSE, 0); mddi_queue_register_write(REG_PTVP, 1, FALSE, 0); mddi_queue_register_write(REG_PTVB, 2, FALSE, 0); mddi_queue_register_write(REG_PTVW, 128, FALSE, 0); mddi_queue_register_write(REG_PTVF, 15, FALSE, 0); /* Now the sub display..... */ /* Reset High */ mddi_queue_register_write(REG_SUBCTL, 0x0200, FALSE, 0); /* CS=1,RD=1,WE=1,RS=1 */ mddi_queue_register_write(REG_SUBTCMD, 0x000f, TRUE, 0); mddi_wait(1); /* Wait 5us */ if (sharp_subpanel_type == SHARP_SUB_UNKNOWN) { uint32 data; sub_through_write(1, 0x05); sub_through_write(1, 0x6A); sub_through_write(1, 0x1D); sub_through_write(1, 0x05); data = sub_through_read(1); if (data == 0x6A) { sharp_subpanel_type = SHARP_SUB_HYNIX; } else { sub_through_write(0, 0x36); sub_through_write(1, 0xA8); sub_through_write(0, 0x09); data = sub_through_read(1); data = sub_through_read(1); if (data == 0x54) { sub_through_write(0, 0x36); sub_through_write(1, 0x00); sharp_subpanel_type = SHARP_SUB_ROHM; } } } if (sharp_subpanel_type == SHARP_SUB_HYNIX) { sub_through_write(1, 0x00); /* Display setting 1 */ sub_through_write(1, 0x04); sub_through_write(1, 0x01); sub_through_write(1, 0x05); sub_through_write(1, 0x0280); sub_through_write(1, 0x0301); sub_through_write(1, 0x0402); sub_through_write(1, 0x0500); sub_through_write(1, 0x0681); sub_through_write(1, 0x077F); sub_through_write(1, 0x08C0); sub_through_write(1, 0x0905); sub_through_write(1, 0x0A02); sub_through_write(1, 0x0B00); sub_through_write(1, 0x0C00); sub_through_write(1, 0x0D00); sub_through_write(1, 0x0E00); sub_through_write(1, 0x0F00); sub_through_write(1, 0x100B); /* Display setting 2 */ sub_through_write(1, 0x1103); sub_through_write(1, 0x1237); sub_through_write(1, 0x1300); sub_through_write(1, 0x1400); sub_through_write(1, 0x1500); sub_through_write(1, 0x1605); sub_through_write(1, 0x1700); sub_through_write(1, 0x1800); sub_through_write(1, 0x192E); sub_through_write(1, 0x1A00); sub_through_write(1, 0x1B00); sub_through_write(1, 0x1C00); sub_through_write(1, 0x151A); /* Power setting */ sub_through_write(1, 0x2002); /* Gradation Palette setting */ sub_through_write(1, 0x2107); sub_through_write(1, 0x220C); sub_through_write(1, 0x2310); sub_through_write(1, 0x2414); sub_through_write(1, 0x2518); sub_through_write(1, 0x261C); sub_through_write(1, 0x2720); sub_through_write(1, 0x2824); sub_through_write(1, 0x2928); sub_through_write(1, 0x2A2B); sub_through_write(1, 0x2B2E); sub_through_write(1, 0x2C31); sub_through_write(1, 0x2D34); sub_through_write(1, 0x2E37); sub_through_write(1, 0x2F3A); sub_through_write(1, 0x303C); sub_through_write(1, 0x313E); sub_through_write(1, 0x323F); sub_through_write(1, 0x3340); sub_through_write(1, 0x3441); sub_through_write(1, 0x3543); sub_through_write(1, 0x3646); sub_through_write(1, 0x3749); sub_through_write(1, 0x384C); sub_through_write(1, 0x394F); sub_through_write(1, 0x3A52); sub_through_write(1, 0x3B59); sub_through_write(1, 0x3C60); sub_through_write(1, 0x3D67); sub_through_write(1, 0x3E6E); sub_through_write(1, 0x3F7F); sub_through_write(1, 0x4001); sub_through_write(1, 0x4107); sub_through_write(1, 0x420C); sub_through_write(1, 0x4310); sub_through_write(1, 0x4414); sub_through_write(1, 0x4518); sub_through_write(1, 0x461C); sub_through_write(1, 0x4720); sub_through_write(1, 0x4824); sub_through_write(1, 0x4928); sub_through_write(1, 0x4A2B); sub_through_write(1, 0x4B2E); sub_through_write(1, 0x4C31); sub_through_write(1, 0x4D34); sub_through_write(1, 0x4E37); sub_through_write(1, 0x4F3A); sub_through_write(1, 0x503C); sub_through_write(1, 0x513E); sub_through_write(1, 0x523F); sub_through_write(1, 0x5340); sub_through_write(1, 0x5441); sub_through_write(1, 0x5543); sub_through_write(1, 0x5646); sub_through_write(1, 0x5749); sub_through_write(1, 0x584C); sub_through_write(1, 0x594F); sub_through_write(1, 0x5A52); sub_through_write(1, 0x5B59); sub_through_write(1, 0x5C60); sub_through_write(1, 0x5D67); sub_through_write(1, 0x5E6E); sub_through_write(1, 0x5F7E); sub_through_write(1, 0x6000); sub_through_write(1, 0x6107); sub_through_write(1, 0x620C); sub_through_write(1, 0x6310); sub_through_write(1, 0x6414); sub_through_write(1, 0x6518); sub_through_write(1, 0x661C); sub_through_write(1, 0x6720); sub_through_write(1, 0x6824); sub_through_write(1, 0x6928); sub_through_write(1, 0x6A2B); sub_through_write(1, 0x6B2E); sub_through_write(1, 0x6C31); sub_through_write(1, 0x6D34); sub_through_write(1, 0x6E37); sub_through_write(1, 0x6F3A); sub_through_write(1, 0x703C); sub_through_write(1, 0x713E); sub_through_write(1, 0x723F); sub_through_write(1, 0x7340); sub_through_write(1, 0x7441); sub_through_write(1, 0x7543); sub_through_write(1, 0x7646); sub_through_write(1, 0x7749); sub_through_write(1, 0x784C); sub_through_write(1, 0x794F); sub_through_write(1, 0x7A52); sub_through_write(1, 0x7B59); sub_through_write(1, 0x7C60); sub_through_write(1, 0x7D67); sub_through_write(1, 0x7E6E); sub_through_write(1, 0x7F7D); sub_through_write(1, 0x1851); /* Display on */ mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0); /* 1 pixel / 1 post clock */ mddi_queue_register_write(REG_CLKDIV2, 0x3b00, FALSE, 0); /* SUB LCD select */ mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0); /* RS=0,command initiate number=0,select master mode */ mddi_queue_register_write(REG_SUBCTL, 0x0202, FALSE, 0); /* Sub LCD Data transform start */ mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0); } else if (sharp_subpanel_type == SHARP_SUB_ROHM) { sub_through_write(0, 0x01); /* Display setting */ sub_through_write(1, 0x00); mddi_wait(1); /* Wait 100us <----- ******* Update 2005/01/24 */ sub_through_write(0, 0xB6); sub_through_write(1, 0x0C); sub_through_write(1, 0x4A); sub_through_write(1, 0x20); sub_through_write(0, 0x3A); sub_through_write(1, 0x05); sub_through_write(0, 0xB7); sub_through_write(1, 0x01); sub_through_write(0, 0xBA); sub_through_write(1, 0x20); sub_through_write(1, 0x02); sub_through_write(0, 0x25); sub_through_write(1, 0x4F); sub_through_write(0, 0xBB); sub_through_write(1, 0x00); sub_through_write(0, 0x36); sub_through_write(1, 0x00); sub_through_write(0, 0xB1); sub_through_write(1, 0x05); sub_through_write(0, 0xBE); sub_through_write(1, 0x80); sub_through_write(0, 0x26); sub_through_write(1, 0x01); sub_through_write(0, 0x2A); sub_through_write(1, 0x02); sub_through_write(1, 0x81); sub_through_write(0, 0x2B); sub_through_write(1, 0x00); sub_through_write(1, 0x7F); sub_through_write(0, 0x2C); sub_through_write(0, 0x11); /* Sleep mode off */ mddi_wait(1); /* Wait 100 ms <----- ******* Update 2005/01/24 */ sub_through_write(0, 0x29); /* Display on */ sub_through_write(0, 0xB3); sub_through_write(1, 0x20); sub_through_write(1, 0xAA); sub_through_write(1, 0xA0); sub_through_write(1, 0x20); sub_through_write(1, 0x30); sub_through_write(1, 0xA6); sub_through_write(1, 0xFF); sub_through_write(1, 0x9A); sub_through_write(1, 0x9F); sub_through_write(1, 0xAF); sub_through_write(1, 0xBC); sub_through_write(1, 0xCF); sub_through_write(1, 0xDF); sub_through_write(1, 0x20); sub_through_write(1, 0x9C); sub_through_write(1, 0x8A); sub_through_write(0, 0x002C); /* Display on */ /* 1 pixel / 2 post clock */ mddi_queue_register_write(REG_CLKDIV2, 0x7b00, FALSE, 0); /* SUB LCD select */ mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0); /* RS=1,command initiate number=0,select master mode */ mddi_queue_register_write(REG_SUBCTL, 0x0242, FALSE, 0); /* Sub LCD Data transform start */ mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0); } /* Set the MDP pixel data attributes for Sub Display */ mddi_host_write_pix_attr_reg(0x00C0); } void mddi_sharp_lcd_vsync_detected(boolean detected) { /* static timetick_type start_time = 0; */ static struct timeval start_time; static boolean first_time = TRUE; /* uint32 mdp_cnt_val = 0; */ /* timetick_type elapsed_us; */ struct timeval now; uint32 elapsed_us; uint32 num_vsyncs; if ((detected) || (mddi_sharp_vsync_attempts > 5)) { if ((detected) && (mddi_sharp_monitor_refresh_value)) { /* if (start_time != 0) */ if (!first_time) { jiffies_to_timeval(jiffies, &now); elapsed_us = (now.tv_sec - start_time.tv_sec) * 1000000 + now.tv_usec - start_time.tv_usec; /* * LCD is configured for a refresh every usecs, * so to determine the number of vsyncs that * have occurred since the last measurement add * half that to the time difference and divide * by the refresh rate. */ num_vsyncs = (elapsed_us + (mddi_sharp_usecs_per_refresh >> 1)) / mddi_sharp_usecs_per_refresh; /* * LCD is configured for * hsyncs (rows) per * refresh cycle. Calculate new rows_per_second * value based upon these new measurements. * MDP can update with this new value. */ mddi_sharp_rows_per_second = (mddi_sharp_rows_per_refresh * 1000 * num_vsyncs) / (elapsed_us / 1000); } /* start_time = timetick_get(); */ first_time = FALSE; jiffies_to_timeval(jiffies, &start_time); if (mddi_sharp_report_refresh_measurements) { /* mdp_cnt_val = MDP_LINE_COUNT; */ } } /* if detected = TRUE, client initiated wakeup was detected */ if (mddi_sharp_vsync_handler != NULL) { (*mddi_sharp_vsync_handler) (mddi_sharp_vsync_handler_arg); mddi_sharp_vsync_handler = NULL; } mddi_vsync_detect_enabled = FALSE; mddi_sharp_vsync_attempts = 0; /* need to clear this vsync wakeup */ if (!mddi_queue_register_write_int(REG_INTR, 0x0000)) { MDDI_MSG_ERR("Vsync interrupt clear failed!\n"); } if (!detected) { /* give up after 5 failed attempts but show error */ MDDI_MSG_NOTICE("Vsync detection failed!\n"); } else if ((mddi_sharp_monitor_refresh_value) && (mddi_sharp_report_refresh_measurements)) { MDDI_MSG_NOTICE(" Lines Per Second=%d!\n", mddi_sharp_rows_per_second); } } else /* if detected = FALSE, we woke up from hibernation, but did not * detect client initiated wakeup. */ mddi_sharp_vsync_attempts++; } /* ISR to be executed */ void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler, void *arg) { boolean error = FALSE; unsigned long flags; /* Disable interrupts */ spin_lock_irqsave(&mddi_host_spin_lock, flags); /* INTLOCK(); */ if (mddi_sharp_vsync_handler != NULL) error = TRUE; /* Register the handler for this particular GROUP interrupt source */ mddi_sharp_vsync_handler = handler; mddi_sharp_vsync_handler_arg = arg; /* Restore interrupts */ spin_unlock_irqrestore(&mddi_host_spin_lock, flags); /* INTFREE(); */ if (error) MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n"); /* Enable the vsync wakeup */ mddi_queue_register_write(REG_INTR, 0x8100, FALSE, 0); mddi_sharp_vsync_attempts = 1; mddi_vsync_detect_enabled = TRUE; } /* mddi_sharp_vsync_set_handler */ static int mddi_sharp_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (mfd->panel.id == SHARP_QVGA_PRIM) mddi_sharp_prim_lcd_init(); else mddi_sharp_sub_lcd_init(); return 0; } static int mddi_sharp_lcd_off(struct platform_device *pdev) { mddi_sharp_lcd_powerdown(); return 0; } static int __init mddi_sharp_probe(struct platform_device *pdev) { if (pdev->id == 0) { mddi_sharp_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = mddi_sharp_probe, .driver = { .name = "mddi_sharp_qvga", }, }; static struct msm_fb_panel_data mddi_sharp_panel_data0 = { .on = mddi_sharp_lcd_on, .off = mddi_sharp_lcd_off, .set_backlight = mddi_sharp_lcd_set_backlight, .set_vsync_notifier = mddi_sharp_vsync_set_handler, }; static struct platform_device this_device_0 = { .name = "mddi_sharp_qvga", .id = SHARP_QVGA_PRIM, .dev = { .platform_data = &mddi_sharp_panel_data0, } }; static struct msm_fb_panel_data mddi_sharp_panel_data1 = { .on = mddi_sharp_lcd_on, .off = mddi_sharp_lcd_off, }; static struct platform_device this_device_1 = { .name = "mddi_sharp_qvga", .id = SHARP_128X128_SECD, .dev = { .platform_data = &mddi_sharp_panel_data1, } }; static int __init mddi_sharp_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT u32 id; ret = msm_fb_detect_client("mddi_sharp_qvga"); if (ret == -ENODEV) return 0; if (ret) { id = mddi_get_client_id(); if (((id >> 16) != 0x0) || ((id & 0xffff) != 0x8835)) return 0; } #endif if (mddi_host_core_version > 8) { /* can use faster refresh with newer hw revisions */ mddi_sharp_debug_60hz_refresh = TRUE; /* Timing variables for tracking vsync */ /* dot_clock = 6.00MHz * horizontal count = 296 * vertical count = 338 * refresh rate = 6000000/(296+338) = 60Hz */ mddi_sharp_rows_per_second = 20270; /* 6000000/296 */ mddi_sharp_rows_per_refresh = 338; mddi_sharp_usecs_per_refresh = 16674; /* (296+338)/6000000 */ } else { /* Timing variables for tracking vsync */ /* dot_clock = 5.20MHz * horizontal count = 376 * vertical count = 338 * refresh rate = 5200000/(376+338) = 41Hz */ mddi_sharp_rows_per_second = 13830; /* 5200000/376 */ mddi_sharp_rows_per_refresh = 338; mddi_sharp_usecs_per_refresh = 24440; /* (376+338)/5200000 */ } ret = platform_driver_register(&this_driver); if (!ret) { pinfo = &mddi_sharp_panel_data0.panel_info; pinfo->xres = 240; pinfo->yres = 320; pinfo->type = MDDI_PANEL; pinfo->pdest = DISPLAY_1; pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR; pinfo->wait_cycle = 0; pinfo->bpp = 18; pinfo->fb_num = 2; pinfo->clk_rate = 122880000; pinfo->clk_min = 120000000; pinfo->clk_max = 125000000; pinfo->lcd.vsync_enable = TRUE; pinfo->lcd.refx100 = (mddi_sharp_rows_per_second * 100) / mddi_sharp_rows_per_refresh; pinfo->lcd.v_back_porch = 12; pinfo->lcd.v_front_porch = 6; pinfo->lcd.v_pulse_width = 0; pinfo->lcd.hw_vsync_mode = FALSE; pinfo->lcd.vsync_notifier_period = (1 * HZ); pinfo->bl_max = 7; pinfo->bl_min = 1; ret = platform_device_register(&this_device_0); if (ret) platform_driver_unregister(&this_driver); pinfo = &mddi_sharp_panel_data1.panel_info; pinfo->xres = 128; pinfo->yres = 128; pinfo->type = MDDI_PANEL; pinfo->pdest = DISPLAY_2; pinfo->mddi.vdopkt = 0x400; pinfo->wait_cycle = 0; pinfo->bpp = 18; pinfo->clk_rate = 122880000; pinfo->clk_min = 120000000; pinfo->clk_max = 125000000; pinfo->fb_num = 2; ret = platform_device_register(&this_device_1); if (ret) { platform_device_unregister(&this_device_0); platform_driver_unregister(&this_driver); } } if (!ret) mddi_lcd.vsync_detected = mddi_sharp_lcd_vsync_detected; return ret; } module_init(mddi_sharp_init);
gpl-2.0
CyanogenMod/android_kernel_samsung_piranha
sound/core/rtctimer.c
4339
4239
/* * RTC based high-frequency timer * * Copyright (C) 2000 Takashi Iwai * based on rtctimer.c by Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/log2.h> #include <sound/core.h> #include <sound/timer.h> #if defined(CONFIG_RTC) || defined(CONFIG_RTC_MODULE) #include <linux/mc146818rtc.h> #define RTC_FREQ 1024 /* default frequency */ #define NANO_SEC 1000000000L /* 10^9 in sec */ /* * prototypes */ static int rtctimer_open(struct snd_timer *t); static int rtctimer_close(struct snd_timer *t); static int rtctimer_start(struct snd_timer *t); static int rtctimer_stop(struct snd_timer *t); /* * The hardware dependent description for this timer. */ static struct snd_timer_hardware rtc_hw = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .ticks = 100000000L, /* FIXME: XXX */ .open = rtctimer_open, .close = rtctimer_close, .start = rtctimer_start, .stop = rtctimer_stop, }; static int rtctimer_freq = RTC_FREQ; /* frequency */ static struct snd_timer *rtctimer; static struct tasklet_struct rtc_tasklet; static rtc_task_t rtc_task; static int rtctimer_open(struct snd_timer *t) { int err; err = rtc_register(&rtc_task); if (err < 0) return err; t->private_data = &rtc_task; return 0; } static int rtctimer_close(struct snd_timer *t) { rtc_task_t *rtc = t->private_data; if (rtc) { rtc_unregister(rtc); tasklet_kill(&rtc_tasklet); t->private_data = NULL; } return 0; } static int rtctimer_start(struct snd_timer *timer) { rtc_task_t *rtc = timer->private_data; if (snd_BUG_ON(!rtc)) return -EINVAL; rtc_control(rtc, RTC_IRQP_SET, rtctimer_freq); rtc_control(rtc, RTC_PIE_ON, 0); return 0; } static int rtctimer_stop(struct snd_timer *timer) { rtc_task_t *rtc = timer->private_data; if (snd_BUG_ON(!rtc)) return -EINVAL; rtc_control(rtc, RTC_PIE_OFF, 0); return 0; } static void rtctimer_tasklet(unsigned long data) { snd_timer_interrupt((struct snd_timer *)data, 1); } /* * interrupt */ static void rtctimer_interrupt(void *private_data) { tasklet_schedule(private_data); } /* * ENTRY functions */ static int __init rtctimer_init(void) { int err; struct snd_timer *timer; if (rtctimer_freq < 2 || rtctimer_freq > 8192 || !is_power_of_2(rtctimer_freq)) { snd_printk(KERN_ERR "rtctimer: invalid frequency %d\n", rtctimer_freq); return -EINVAL; } /* Create a new timer and set up the fields */ err = snd_timer_global_new("rtc", SNDRV_TIMER_GLOBAL_RTC, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "RTC timer"); timer->hw = rtc_hw; timer->hw.resolution = NANO_SEC / rtctimer_freq; tasklet_init(&rtc_tasklet, rtctimer_tasklet, (unsigned long)timer); /* set up RTC callback */ rtc_task.func = rtctimer_interrupt; rtc_task.private_data = &rtc_tasklet; err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } rtctimer = timer; /* remember this */ return 0; } static void __exit rtctimer_exit(void) { if (rtctimer) { snd_timer_global_free(rtctimer); rtctimer = NULL; } } /* * exported stuff */ module_init(rtctimer_init) module_exit(rtctimer_exit) module_param(rtctimer_freq, int, 0444); MODULE_PARM_DESC(rtctimer_freq, "timer frequency in Hz"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_RTC)); #endif /* CONFIG_RTC || CONFIG_RTC_MODULE */
gpl-2.0
4rch0x0/void-kernel
sound/core/rtctimer.c
4339
4239
/* * RTC based high-frequency timer * * Copyright (C) 2000 Takashi Iwai * based on rtctimer.c by Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/log2.h> #include <sound/core.h> #include <sound/timer.h> #if defined(CONFIG_RTC) || defined(CONFIG_RTC_MODULE) #include <linux/mc146818rtc.h> #define RTC_FREQ 1024 /* default frequency */ #define NANO_SEC 1000000000L /* 10^9 in sec */ /* * prototypes */ static int rtctimer_open(struct snd_timer *t); static int rtctimer_close(struct snd_timer *t); static int rtctimer_start(struct snd_timer *t); static int rtctimer_stop(struct snd_timer *t); /* * The hardware dependent description for this timer. */ static struct snd_timer_hardware rtc_hw = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .ticks = 100000000L, /* FIXME: XXX */ .open = rtctimer_open, .close = rtctimer_close, .start = rtctimer_start, .stop = rtctimer_stop, }; static int rtctimer_freq = RTC_FREQ; /* frequency */ static struct snd_timer *rtctimer; static struct tasklet_struct rtc_tasklet; static rtc_task_t rtc_task; static int rtctimer_open(struct snd_timer *t) { int err; err = rtc_register(&rtc_task); if (err < 0) return err; t->private_data = &rtc_task; return 0; } static int rtctimer_close(struct snd_timer *t) { rtc_task_t *rtc = t->private_data; if (rtc) { rtc_unregister(rtc); tasklet_kill(&rtc_tasklet); t->private_data = NULL; } return 0; } static int rtctimer_start(struct snd_timer *timer) { rtc_task_t *rtc = timer->private_data; if (snd_BUG_ON(!rtc)) return -EINVAL; rtc_control(rtc, RTC_IRQP_SET, rtctimer_freq); rtc_control(rtc, RTC_PIE_ON, 0); return 0; } static int rtctimer_stop(struct snd_timer *timer) { rtc_task_t *rtc = timer->private_data; if (snd_BUG_ON(!rtc)) return -EINVAL; rtc_control(rtc, RTC_PIE_OFF, 0); return 0; } static void rtctimer_tasklet(unsigned long data) { snd_timer_interrupt((struct snd_timer *)data, 1); } /* * interrupt */ static void rtctimer_interrupt(void *private_data) { tasklet_schedule(private_data); } /* * ENTRY functions */ static int __init rtctimer_init(void) { int err; struct snd_timer *timer; if (rtctimer_freq < 2 || rtctimer_freq > 8192 || !is_power_of_2(rtctimer_freq)) { snd_printk(KERN_ERR "rtctimer: invalid frequency %d\n", rtctimer_freq); return -EINVAL; } /* Create a new timer and set up the fields */ err = snd_timer_global_new("rtc", SNDRV_TIMER_GLOBAL_RTC, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "RTC timer"); timer->hw = rtc_hw; timer->hw.resolution = NANO_SEC / rtctimer_freq; tasklet_init(&rtc_tasklet, rtctimer_tasklet, (unsigned long)timer); /* set up RTC callback */ rtc_task.func = rtctimer_interrupt; rtc_task.private_data = &rtc_tasklet; err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } rtctimer = timer; /* remember this */ return 0; } static void __exit rtctimer_exit(void) { if (rtctimer) { snd_timer_global_free(rtctimer); rtctimer = NULL; } } /* * exported stuff */ module_init(rtctimer_init) module_exit(rtctimer_exit) module_param(rtctimer_freq, int, 0444); MODULE_PARM_DESC(rtctimer_freq, "timer frequency in Hz"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_RTC)); #endif /* CONFIG_RTC || CONFIG_RTC_MODULE */
gpl-2.0
UniqueDroid/lge-kernel-p880
drivers/input/joystick/analog.c
5619
20374
/* * Copyright (c) 1996-2001 Vojtech Pavlik */ /* * Analog joystick and gamepad driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/input.h> #include <linux/gameport.h> #include <linux/jiffies.h> #include <linux/timex.h> #define DRIVER_DESC "Analog joystick and gamepad driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Option parsing. */ #define ANALOG_PORTS 16 static char *js[ANALOG_PORTS]; static unsigned int js_nargs; static int analog_options[ANALOG_PORTS]; module_param_array_named(map, js, charp, &js_nargs, 0); MODULE_PARM_DESC(map, "Describes analog joysticks type/capabilities"); /* * Times, feature definitions. */ #define ANALOG_RUDDER 0x00004 #define ANALOG_THROTTLE 0x00008 #define ANALOG_AXES_STD 0x0000f #define ANALOG_BTNS_STD 0x000f0 #define ANALOG_BTNS_CHF 0x00100 #define ANALOG_HAT1_CHF 0x00200 #define ANALOG_HAT2_CHF 0x00400 #define ANALOG_HAT_FCS 0x00800 #define ANALOG_HATS_ALL 0x00e00 #define ANALOG_BTN_TL 0x01000 #define ANALOG_BTN_TR 0x02000 #define ANALOG_BTN_TL2 0x04000 #define ANALOG_BTN_TR2 0x08000 #define ANALOG_BTNS_TLR 0x03000 #define ANALOG_BTNS_TLR2 0x0c000 #define ANALOG_BTNS_GAMEPAD 0x0f000 #define ANALOG_HBTN_CHF 0x10000 #define ANALOG_ANY_CHF 0x10700 #define ANALOG_SAITEK 0x20000 #define ANALOG_EXTENSIONS 0x7ff00 #define ANALOG_GAMEPAD 0x80000 #define ANALOG_MAX_TIME 3 /* 3 ms */ #define ANALOG_LOOP_TIME 2000 /* 2 * loop */ #define ANALOG_SAITEK_DELAY 200 /* 200 us */ #define ANALOG_SAITEK_TIME 2000 /* 2000 us */ #define ANALOG_AXIS_TIME 2 /* 2 * refresh */ #define ANALOG_INIT_RETRIES 8 /* 8 times */ #define ANALOG_FUZZ_BITS 2 /* 2 bit more */ #define ANALOG_FUZZ_MAGIC 36 /* 36 u*ms/loop */ #define ANALOG_MAX_NAME_LENGTH 128 #define ANALOG_MAX_PHYS_LENGTH 32 static short analog_axes[] = { ABS_X, ABS_Y, ABS_RUDDER, ABS_THROTTLE }; static short analog_hats[] = { ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y, ABS_HAT2X, ABS_HAT2Y }; static short analog_pads[] = { BTN_Y, BTN_Z, BTN_TL, BTN_TR }; static short analog_exts[] = { ANALOG_HAT1_CHF, ANALOG_HAT2_CHF, ANALOG_HAT_FCS }; static short analog_pad_btn[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_TL2, BTN_TR2, BTN_SELECT, BTN_START, BTN_MODE, BTN_BASE }; static short analog_joy_btn[] = { BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_BASE6 }; static unsigned char analog_chf[] = { 0xf, 0x0, 0x1, 0x9, 0x2, 0x4, 0xc, 0x8, 0x3, 0x5, 0xb, 0x7, 0xd, 0xe, 0xa, 0x6 }; struct analog { struct input_dev *dev; int mask; short *buttons; char name[ANALOG_MAX_NAME_LENGTH]; char phys[ANALOG_MAX_PHYS_LENGTH]; }; struct analog_port { struct gameport *gameport; struct analog analog[2]; unsigned char mask; char saitek; char cooked; int bads; int reads; int speed; int loop; int fuzz; int axes[4]; int buttons; int initial[4]; int axtime; }; /* * Time macros. */ #ifdef __i386__ #include <linux/i8253.h> #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") static unsigned int get_time_pit(void) { unsigned long flags; unsigned int count; raw_spin_lock_irqsave(&i8253_lock, flags); outb_p(0x00, 0x43); count = inb_p(0x40); count |= inb_p(0x40) << 8; raw_spin_unlock_irqrestore(&i8253_lock, flags); return count; } #elif defined(__x86_64__) #define GET_TIME(x) rdtscl(x) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "TSC" #elif defined(__alpha__) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "PCC" #elif defined(CONFIG_MN10300) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x, y) ((x) - (y)) #define TIME_NAME "TSC" #else #define FAKE_TIME static unsigned long analog_faketime = 0; #define GET_TIME(x) do { x = analog_faketime++; } while(0) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "Unreliable" #warning Precise timer not defined for this architecture. #endif /* * analog_decode() decodes analog joystick data and reports input events. */ static void analog_decode(struct analog *analog, int *axes, int *initial, int buttons) { struct input_dev *dev = analog->dev; int i, j; if (analog->mask & ANALOG_HAT_FCS) for (i = 0; i < 4; i++) if (axes[3] < ((initial[3] * ((i << 1) + 1)) >> 3)) { buttons |= 1 << (i + 14); break; } for (i = j = 0; i < 6; i++) if (analog->mask & (0x10 << i)) input_report_key(dev, analog->buttons[j++], (buttons >> i) & 1); if (analog->mask & ANALOG_HBTN_CHF) for (i = 0; i < 4; i++) input_report_key(dev, analog->buttons[j++], (buttons >> (i + 10)) & 1); if (analog->mask & ANALOG_BTN_TL) input_report_key(dev, analog_pads[0], axes[2] < (initial[2] >> 1)); if (analog->mask & ANALOG_BTN_TR) input_report_key(dev, analog_pads[1], axes[3] < (initial[3] >> 1)); if (analog->mask & ANALOG_BTN_TL2) input_report_key(dev, analog_pads[2], axes[2] > (initial[2] + (initial[2] >> 1))); if (analog->mask & ANALOG_BTN_TR2) input_report_key(dev, analog_pads[3], axes[3] > (initial[3] + (initial[3] >> 1))); for (i = j = 0; i < 4; i++) if (analog->mask & (1 << i)) input_report_abs(dev, analog_axes[j++], axes[i]); for (i = j = 0; i < 3; i++) if (analog->mask & analog_exts[i]) { input_report_abs(dev, analog_hats[j++], ((buttons >> ((i << 2) + 7)) & 1) - ((buttons >> ((i << 2) + 9)) & 1)); input_report_abs(dev, analog_hats[j++], ((buttons >> ((i << 2) + 8)) & 1) - ((buttons >> ((i << 2) + 6)) & 1)); } input_sync(dev); } /* * analog_cooked_read() reads analog joystick data. */ static int analog_cooked_read(struct analog_port *port) { struct gameport *gameport = port->gameport; unsigned int time[4], start, loop, now, loopout, timeout; unsigned char data[4], this, last; unsigned long flags; int i, j; loopout = (ANALOG_LOOP_TIME * port->loop) / 1000; timeout = ANALOG_MAX_TIME * port->speed; local_irq_save(flags); gameport_trigger(gameport); GET_TIME(now); local_irq_restore(flags); start = now; this = port->mask; i = 0; do { loop = now; last = this; local_irq_disable(); this = gameport_read(gameport) & port->mask; GET_TIME(now); local_irq_restore(flags); if ((last ^ this) && (DELTA(loop, now) < loopout)) { data[i] = last ^ this; time[i] = now; i++; } } while (this && (i < 4) && (DELTA(start, now) < timeout)); this <<= 4; for (--i; i >= 0; i--) { this |= data[i]; for (j = 0; j < 4; j++) if (data[i] & (1 << j)) port->axes[j] = (DELTA(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop; } return -(this != port->mask); } static int analog_button_read(struct analog_port *port, char saitek, char chf) { unsigned char u; int t = 1, i = 0; int strobe = gameport_time(port->gameport, ANALOG_SAITEK_TIME); u = gameport_read(port->gameport); if (!chf) { port->buttons = (~u >> 4) & 0xf; return 0; } port->buttons = 0; while ((~u & 0xf0) && (i < 16) && t) { port->buttons |= 1 << analog_chf[(~u >> 4) & 0xf]; if (!saitek) return 0; udelay(ANALOG_SAITEK_DELAY); t = strobe; gameport_trigger(port->gameport); while (((u = gameport_read(port->gameport)) & port->mask) && t) t--; i++; } return -(!t || (i == 16)); } /* * analog_poll() repeatedly polls the Analog joysticks. */ static void analog_poll(struct gameport *gameport) { struct analog_port *port = gameport_get_drvdata(gameport); int i; char saitek = !!(port->analog[0].mask & ANALOG_SAITEK); char chf = !!(port->analog[0].mask & ANALOG_ANY_CHF); if (port->cooked) { port->bads -= gameport_cooked_read(port->gameport, port->axes, &port->buttons); if (chf) port->buttons = port->buttons ? (1 << analog_chf[port->buttons]) : 0; port->reads++; } else { if (!port->axtime--) { port->bads -= analog_cooked_read(port); port->bads -= analog_button_read(port, saitek, chf); port->reads++; port->axtime = ANALOG_AXIS_TIME - 1; } else { if (!saitek) analog_button_read(port, saitek, chf); } } for (i = 0; i < 2; i++) if (port->analog[i].mask) analog_decode(port->analog + i, port->axes, port->initial, port->buttons); } /* * analog_open() is a callback from the input open routine. */ static int analog_open(struct input_dev *dev) { struct analog_port *port = input_get_drvdata(dev); gameport_start_polling(port->gameport); return 0; } /* * analog_close() is a callback from the input close routine. */ static void analog_close(struct input_dev *dev) { struct analog_port *port = input_get_drvdata(dev); gameport_stop_polling(port->gameport); } /* * analog_calibrate_timer() calibrates the timer and computes loop * and timeout values for a joystick port. */ static void analog_calibrate_timer(struct analog_port *port) { struct gameport *gameport = port->gameport; unsigned int i, t, tx, t1, t2, t3; unsigned long flags; local_irq_save(flags); GET_TIME(t1); #ifdef FAKE_TIME analog_faketime += 830; #endif mdelay(1); GET_TIME(t2); GET_TIME(t3); local_irq_restore(flags); port->speed = DELTA(t1, t2) - DELTA(t2, t3); tx = ~0; for (i = 0; i < 50; i++) { local_irq_save(flags); GET_TIME(t1); for (t = 0; t < 50; t++) { gameport_read(gameport); GET_TIME(t2); } GET_TIME(t3); local_irq_restore(flags); udelay(i); t = DELTA(t1, t2) - DELTA(t2, t3); if (t < tx) tx = t; } port->loop = tx / 50; } /* * analog_name() constructs a name for an analog joystick. */ static void analog_name(struct analog *analog) { snprintf(analog->name, sizeof(analog->name), "Analog %d-axis %d-button", hweight8(analog->mask & ANALOG_AXES_STD), hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 + hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4); if (analog->mask & ANALOG_HATS_ALL) snprintf(analog->name, sizeof(analog->name), "%s %d-hat", analog->name, hweight16(analog->mask & ANALOG_HATS_ALL)); if (analog->mask & ANALOG_HAT_FCS) strlcat(analog->name, " FCS", sizeof(analog->name)); if (analog->mask & ANALOG_ANY_CHF) strlcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF", sizeof(analog->name)); strlcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick", sizeof(analog->name)); } /* * analog_init_device() */ static int analog_init_device(struct analog_port *port, struct analog *analog, int index) { struct input_dev *input_dev; int i, j, t, v, w, x, y, z; int error; analog_name(analog); snprintf(analog->phys, sizeof(analog->phys), "%s/input%d", port->gameport->phys, index); analog->buttons = (analog->mask & ANALOG_GAMEPAD) ? analog_pad_btn : analog_joy_btn; analog->dev = input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = analog->name; input_dev->phys = analog->phys; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_ANALOG; input_dev->id.product = analog->mask >> 4; input_dev->id.version = 0x0100; input_dev->dev.parent = &port->gameport->dev; input_set_drvdata(input_dev, port); input_dev->open = analog_open; input_dev->close = analog_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = j = 0; i < 4; i++) if (analog->mask & (1 << i)) { t = analog_axes[j]; x = port->axes[i]; y = (port->axes[0] + port->axes[1]) >> 1; z = y - port->axes[i]; z = z > 0 ? z : -z; v = (x >> 3); w = (x >> 3); if ((i == 2 || i == 3) && (j == 2 || j == 3) && (z > (y >> 3))) x = y; if (analog->mask & ANALOG_SAITEK) { if (i == 2) x = port->axes[i]; v = x - (x >> 2); w = (x >> 4); } input_set_abs_params(input_dev, t, v, (x << 1) - v, port->fuzz, w); j++; } for (i = j = 0; i < 3; i++) if (analog->mask & analog_exts[i]) for (x = 0; x < 2; x++) { t = analog_hats[j++]; input_set_abs_params(input_dev, t, -1, 1, 0, 0); } for (i = j = 0; i < 4; i++) if (analog->mask & (0x10 << i)) set_bit(analog->buttons[j++], input_dev->keybit); if (analog->mask & ANALOG_BTNS_CHF) for (i = 0; i < 2; i++) set_bit(analog->buttons[j++], input_dev->keybit); if (analog->mask & ANALOG_HBTN_CHF) for (i = 0; i < 4; i++) set_bit(analog->buttons[j++], input_dev->keybit); for (i = 0; i < 4; i++) if (analog->mask & (ANALOG_BTN_TL << i)) set_bit(analog_pads[i], input_dev->keybit); analog_decode(analog, port->axes, port->initial, port->buttons); error = input_register_device(analog->dev); if (error) { input_free_device(analog->dev); return error; } return 0; } /* * analog_init_devices() sets up device-specific values and registers the input devices. */ static int analog_init_masks(struct analog_port *port) { int i; struct analog *analog = port->analog; int max[4]; if (!port->mask) return -1; if ((port->mask & 3) != 3 && port->mask != 0xc) { printk(KERN_WARNING "analog.c: Unknown joystick device found " "(data=%#x, %s), probably not analog joystick.\n", port->mask, port->gameport->phys); return -1; } i = analog_options[0]; /* FIXME !!! - need to specify options for different ports */ analog[0].mask = i & 0xfffff; analog[0].mask &= ~(ANALOG_AXES_STD | ANALOG_HAT_FCS | ANALOG_BTNS_GAMEPAD) | port->mask | ((port->mask << 8) & ANALOG_HAT_FCS) | ((port->mask << 10) & ANALOG_BTNS_TLR) | ((port->mask << 12) & ANALOG_BTNS_TLR2); analog[0].mask &= ~(ANALOG_HAT2_CHF) | ((analog[0].mask & ANALOG_HBTN_CHF) ? 0 : ANALOG_HAT2_CHF); analog[0].mask &= ~(ANALOG_THROTTLE | ANALOG_BTN_TR | ANALOG_BTN_TR2) | ((~analog[0].mask & ANALOG_HAT_FCS) >> 8) | ((~analog[0].mask & ANALOG_HAT_FCS) << 2) | ((~analog[0].mask & ANALOG_HAT_FCS) << 4); analog[0].mask &= ~(ANALOG_THROTTLE | ANALOG_RUDDER) | (((~analog[0].mask & ANALOG_BTNS_TLR ) >> 10) & ((~analog[0].mask & ANALOG_BTNS_TLR2) >> 12)); analog[1].mask = ((i >> 20) & 0xff) | ((i >> 12) & 0xf0000); analog[1].mask &= (analog[0].mask & ANALOG_EXTENSIONS) ? ANALOG_GAMEPAD : (((ANALOG_BTNS_STD | port->mask) & ~analog[0].mask) | ANALOG_GAMEPAD); if (port->cooked) { for (i = 0; i < 4; i++) max[i] = port->axes[i] << 1; if ((analog[0].mask & 0x7) == 0x7) max[2] = (max[0] + max[1]) >> 1; if ((analog[0].mask & 0xb) == 0xb) max[3] = (max[0] + max[1]) >> 1; if ((analog[0].mask & ANALOG_BTN_TL) && !(analog[0].mask & ANALOG_BTN_TL2)) max[2] >>= 1; if ((analog[0].mask & ANALOG_BTN_TR) && !(analog[0].mask & ANALOG_BTN_TR2)) max[3] >>= 1; if ((analog[0].mask & ANALOG_HAT_FCS)) max[3] >>= 1; gameport_calibrate(port->gameport, port->axes, max); } for (i = 0; i < 4; i++) port->initial[i] = port->axes[i]; return -!(analog[0].mask || analog[1].mask); } static int analog_init_port(struct gameport *gameport, struct gameport_driver *drv, struct analog_port *port) { int i, t, u, v; port->gameport = gameport; gameport_set_drvdata(gameport, port); if (!gameport_open(gameport, drv, GAMEPORT_MODE_RAW)) { analog_calibrate_timer(port); gameport_trigger(gameport); t = gameport_read(gameport); msleep(ANALOG_MAX_TIME); port->mask = (gameport_read(gameport) ^ t) & t & 0xf; port->fuzz = (port->speed * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS; for (i = 0; i < ANALOG_INIT_RETRIES; i++) { if (!analog_cooked_read(port)) break; msleep(ANALOG_MAX_TIME); } u = v = 0; msleep(ANALOG_MAX_TIME); t = gameport_time(gameport, ANALOG_MAX_TIME * 1000); gameport_trigger(gameport); while ((gameport_read(port->gameport) & port->mask) && (u < t)) u++; udelay(ANALOG_SAITEK_DELAY); t = gameport_time(gameport, ANALOG_SAITEK_TIME); gameport_trigger(gameport); while ((gameport_read(port->gameport) & port->mask) && (v < t)) v++; if (v < (u >> 1)) { /* FIXME - more than one port */ analog_options[0] |= /* FIXME - more than one port */ ANALOG_SAITEK | ANALOG_BTNS_CHF | ANALOG_HBTN_CHF | ANALOG_HAT1_CHF; return 0; } gameport_close(gameport); } if (!gameport_open(gameport, drv, GAMEPORT_MODE_COOKED)) { for (i = 0; i < ANALOG_INIT_RETRIES; i++) if (!gameport_cooked_read(gameport, port->axes, &port->buttons)) break; for (i = 0; i < 4; i++) if (port->axes[i] != -1) port->mask |= 1 << i; port->fuzz = gameport->fuzz; port->cooked = 1; return 0; } return gameport_open(gameport, drv, GAMEPORT_MODE_RAW); } static int analog_connect(struct gameport *gameport, struct gameport_driver *drv) { struct analog_port *port; int i; int err; if (!(port = kzalloc(sizeof(struct analog_port), GFP_KERNEL))) return - ENOMEM; err = analog_init_port(gameport, drv, port); if (err) goto fail1; err = analog_init_masks(port); if (err) goto fail2; gameport_set_poll_handler(gameport, analog_poll); gameport_set_poll_interval(gameport, 10); for (i = 0; i < 2; i++) if (port->analog[i].mask) { err = analog_init_device(port, port->analog + i, i); if (err) goto fail3; } return 0; fail3: while (--i >= 0) if (port->analog[i].mask) input_unregister_device(port->analog[i].dev); fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); kfree(port); return err; } static void analog_disconnect(struct gameport *gameport) { struct analog_port *port = gameport_get_drvdata(gameport); int i; for (i = 0; i < 2; i++) if (port->analog[i].mask) input_unregister_device(port->analog[i].dev); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); printk(KERN_INFO "analog.c: %d out of %d reads (%d%%) on %s failed\n", port->bads, port->reads, port->reads ? (port->bads * 100 / port->reads) : 0, port->gameport->phys); kfree(port); } struct analog_types { char *name; int value; }; static struct analog_types analog_types[] = { { "none", 0x00000000 }, { "auto", 0x000000ff }, { "2btn", 0x0000003f }, { "y-joy", 0x0cc00033 }, { "y-pad", 0x8cc80033 }, { "fcs", 0x000008f7 }, { "chf", 0x000002ff }, { "fullchf", 0x000007ff }, { "gamepad", 0x000830f3 }, { "gamepad8", 0x0008f0f3 }, { NULL, 0 } }; static void analog_parse_options(void) { int i, j; char *end; for (i = 0; i < js_nargs; i++) { for (j = 0; analog_types[j].name; j++) if (!strcmp(analog_types[j].name, js[i])) { analog_options[i] = analog_types[j].value; break; } if (analog_types[j].name) continue; analog_options[i] = simple_strtoul(js[i], &end, 0); if (end != js[i]) continue; analog_options[i] = 0xff; if (!strlen(js[i])) continue; printk(KERN_WARNING "analog.c: Bad config for port %d - \"%s\"\n", i, js[i]); } for (; i < ANALOG_PORTS; i++) analog_options[i] = 0xff; } /* * The gameport device structure. */ static struct gameport_driver analog_drv = { .driver = { .name = "analog", }, .description = DRIVER_DESC, .connect = analog_connect, .disconnect = analog_disconnect, }; static int __init analog_init(void) { analog_parse_options(); return gameport_register_driver(&analog_drv); } static void __exit analog_exit(void) { gameport_unregister_driver(&analog_drv); } module_init(analog_init); module_exit(analog_exit);
gpl-2.0
toastido/N900TUVUDNF4
crypto/md4.c
7155
6308
/* * Cryptographic API. * * MD4 Message Digest Algorithm (RFC1320). * * Implementation derived from Andrew Tridgell and Steve French's * CIFS MD4 implementation, and the cryptoapi implementation * originally based on the public domain implementation written * by Colin Plumb in 1993. * * Copyright (c) Andrew Tridgell 1997-1998. * Modified by Steve French (sfrench@us.ibm.com) 2002 * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> #define MD4_DIGEST_SIZE 16 #define MD4_HMAC_BLOCK_SIZE 64 #define MD4_BLOCK_WORDS 16 #define MD4_HASH_WORDS 4 struct md4_ctx { u32 hash[MD4_HASH_WORDS]; u32 block[MD4_BLOCK_WORDS]; u64 byte_count; }; static inline u32 lshift(u32 x, unsigned int s) { x &= 0xFFFFFFFF; return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); } static inline u32 F(u32 x, u32 y, u32 z) { return (x & y) | ((~x) & z); } static inline u32 G(u32 x, u32 y, u32 z) { return (x & y) | (x & z) | (y & z); } static inline u32 H(u32 x, u32 y, u32 z) { return x ^ y ^ z; } #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { while (words--) { __le32_to_cpus(buf); buf++; } } static inline void cpu_to_le32_array(u32 *buf, unsigned int words) { while (words--) { __cpu_to_le32s(buf); buf++; } } static void md4_transform(u32 *hash, u32 const *in) { u32 a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; ROUND1(a, b, c, d, in[0], 3); ROUND1(d, a, b, c, in[1], 7); ROUND1(c, d, a, b, in[2], 11); ROUND1(b, c, d, a, in[3], 19); ROUND1(a, b, c, d, in[4], 3); ROUND1(d, a, b, c, in[5], 7); ROUND1(c, d, a, b, in[6], 11); ROUND1(b, c, d, a, in[7], 19); ROUND1(a, b, c, d, in[8], 3); ROUND1(d, a, b, c, in[9], 7); ROUND1(c, d, a, b, in[10], 11); ROUND1(b, c, d, a, in[11], 19); ROUND1(a, b, c, d, in[12], 3); ROUND1(d, a, b, c, in[13], 7); ROUND1(c, d, a, b, in[14], 11); ROUND1(b, c, d, a, in[15], 19); ROUND2(a, b, c, d,in[ 0], 3); ROUND2(d, a, b, c, in[4], 5); ROUND2(c, d, a, b, in[8], 9); ROUND2(b, c, d, a, in[12], 13); ROUND2(a, b, c, d, in[1], 3); ROUND2(d, a, b, c, in[5], 5); ROUND2(c, d, a, b, in[9], 9); ROUND2(b, c, d, a, in[13], 13); ROUND2(a, b, c, d, in[2], 3); ROUND2(d, a, b, c, in[6], 5); ROUND2(c, d, a, b, in[10], 9); ROUND2(b, c, d, a, in[14], 13); ROUND2(a, b, c, d, in[3], 3); ROUND2(d, a, b, c, in[7], 5); ROUND2(c, d, a, b, in[11], 9); ROUND2(b, c, d, a, in[15], 13); ROUND3(a, b, c, d,in[ 0], 3); ROUND3(d, a, b, c, in[8], 9); ROUND3(c, d, a, b, in[4], 11); ROUND3(b, c, d, a, in[12], 15); ROUND3(a, b, c, d, in[2], 3); ROUND3(d, a, b, c, in[10], 9); ROUND3(c, d, a, b, in[6], 11); ROUND3(b, c, d, a, in[14], 15); ROUND3(a, b, c, d, in[1], 3); ROUND3(d, a, b, c, in[9], 9); ROUND3(c, d, a, b, in[5], 11); ROUND3(b, c, d, a, in[13], 15); ROUND3(a, b, c, d, in[3], 3); ROUND3(d, a, b, c, in[11], 9); ROUND3(c, d, a, b, in[7], 11); ROUND3(b, c, d, a, in[15], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md4_transform_helper(struct md4_ctx *ctx) { le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block)); md4_transform(ctx->hash, ctx->block); } static int md4_init(struct shash_desc *desc) { struct md4_ctx *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; return 0; } static int md4_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md4_ctx *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md4_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md4_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; } static int md4_final(struct shash_desc *desc, u8 *out) { struct md4_ctx *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md4_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md4_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } static struct shash_alg alg = { .digestsize = MD4_DIGEST_SIZE, .init = md4_init, .update = md4_update, .final = md4_final, .descsize = sizeof(struct md4_ctx), .base = { .cra_name = "md4", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD4_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init md4_mod_init(void) { return crypto_register_shash(&alg); } static void __exit md4_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md4_mod_init); module_exit(md4_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
gpl-2.0
denkem/enru-3.1.10-g7f360be
arch/microblaze/lib/memset.c
7667
2427
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memset on Microblaze * This is generic C code to do efficient, alignment-aware memcpy. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/string.h> #ifdef __HAVE_ARCH_MEMSET #ifndef CONFIG_OPT_LIB_FUNCTION void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; /* Truncate c to 8 bits */ c = (c & 0xFF); /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } #else /* CONFIG_OPT_LIB_FUNCTION */ void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; uint32_t *i_src; uint32_t w32 = 0; /* Truncate c to 8 bits */ c = (c & 0xFF); if (unlikely(c)) { /* Make a repeating word out of it */ w32 = c; w32 |= w32 << 8; w32 |= w32 << 16; } if (likely(n >= 4)) { /* Align the destination to a word boundary */ /* This is done in an endian independent manner */ switch ((unsigned) src & 3) { case 1: *src++ = c; --n; case 2: *src++ = c; --n; case 3: *src++ = c; --n; } i_src = (void *)src; /* Do as many full-word copies as we can */ for (; n >= 4; n -= 4) *i_src++ = w32; src = (void *)i_src; } /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } #endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memset); #endif /* __HAVE_ARCH_MEMSET */
gpl-2.0
ISTweak/android_kernel_sharp_is14sh
tools/perf/builtin-help.c
9203
11439
/* * builtin-help.c * * Builtin help command */ #include "perf.h" #include "util/cache.h" #include "builtin.h" #include "util/exec_cmd.h" #include "common-cmds.h" #include "util/parse-options.h" #include "util/run-command.h" #include "util/help.h" static struct man_viewer_list { struct man_viewer_list *next; char name[FLEX_ARRAY]; } *man_viewer_list; static struct man_viewer_info_list { struct man_viewer_info_list *next; const char *info; char name[FLEX_ARRAY]; } *man_viewer_info_list; enum help_format { HELP_FORMAT_MAN, HELP_FORMAT_INFO, HELP_FORMAT_WEB, }; static bool show_all = false; static enum help_format help_format = HELP_FORMAT_MAN; static struct option builtin_help_options[] = { OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), OPT_SET_UINT('w', "web", &help_format, "show manual in web browser", HELP_FORMAT_WEB), OPT_SET_UINT('i', "info", &help_format, "show info page", HELP_FORMAT_INFO), OPT_END(), }; static const char * const builtin_help_usage[] = { "perf help [--all] [--man|--web|--info] [command]", NULL }; static enum help_format parse_help_format(const char *format) { if (!strcmp(format, "man")) return HELP_FORMAT_MAN; if (!strcmp(format, "info")) return HELP_FORMAT_INFO; if (!strcmp(format, "web") || !strcmp(format, "html")) return HELP_FORMAT_WEB; die("unrecognized help format '%s'", format); } static const char *get_man_viewer_info(const char *name) { struct man_viewer_info_list *viewer; for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) { if (!strcasecmp(name, viewer->name)) return viewer->info; } return NULL; } static int check_emacsclient_version(void) { struct strbuf buffer = STRBUF_INIT; struct child_process ec_process; const char *argv_ec[] = { "emacsclient", "--version", NULL }; int version; /* emacsclient prints its version number on stderr */ memset(&ec_process, 0, sizeof(ec_process)); ec_process.argv = argv_ec; ec_process.err = -1; ec_process.stdout_to_stderr = 1; if (start_command(&ec_process)) { fprintf(stderr, "Failed to start emacsclient.\n"); return -1; } strbuf_read(&buffer, ec_process.err, 20); close(ec_process.err); /* * Don't bother checking return value, because "emacsclient --version" * seems to always exits with code 1. */ finish_command(&ec_process); if (prefixcmp(buffer.buf, "emacsclient")) { fprintf(stderr, "Failed to parse emacsclient version.\n"); strbuf_release(&buffer); return -1; } strbuf_remove(&buffer, 0, strlen("emacsclient")); version = atoi(buffer.buf); if (version < 22) { fprintf(stderr, "emacsclient version '%d' too old (< 22).\n", version); strbuf_release(&buffer); return -1; } strbuf_release(&buffer); return 0; } static void exec_woman_emacs(const char *path, const char *page) { if (!check_emacsclient_version()) { /* This works only with emacsclient version >= 22. */ struct strbuf man_page = STRBUF_INIT; if (!path) path = "emacsclient"; strbuf_addf(&man_page, "(woman \"%s\")", page); execlp(path, "emacsclient", "-e", man_page.buf, NULL); warning("failed to exec '%s': %s", path, strerror(errno)); } } static void exec_man_konqueror(const char *path, const char *page) { const char *display = getenv("DISPLAY"); if (display && *display) { struct strbuf man_page = STRBUF_INIT; const char *filename = "kfmclient"; /* It's simpler to launch konqueror using kfmclient. */ if (path) { const char *file = strrchr(path, '/'); if (file && !strcmp(file + 1, "konqueror")) { char *new = strdup(path); char *dest = strrchr(new, '/'); /* strlen("konqueror") == strlen("kfmclient") */ strcpy(dest + 1, "kfmclient"); path = new; } if (file) filename = file; } else path = "kfmclient"; strbuf_addf(&man_page, "man:%s(1)", page); execlp(path, filename, "newTab", man_page.buf, NULL); warning("failed to exec '%s': %s", path, strerror(errno)); } } static void exec_man_man(const char *path, const char *page) { if (!path) path = "man"; execlp(path, "man", page, NULL); warning("failed to exec '%s': %s", path, strerror(errno)); } static void exec_man_cmd(const char *cmd, const char *page) { struct strbuf shell_cmd = STRBUF_INIT; strbuf_addf(&shell_cmd, "%s %s", cmd, page); execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL); warning("failed to exec '%s': %s", cmd, strerror(errno)); } static void add_man_viewer(const char *name) { struct man_viewer_list **p = &man_viewer_list; size_t len = strlen(name); while (*p) p = &((*p)->next); *p = zalloc(sizeof(**p) + len + 1); strncpy((*p)->name, name, len); } static int supported_man_viewer(const char *name, size_t len) { return (!strncasecmp("man", name, len) || !strncasecmp("woman", name, len) || !strncasecmp("konqueror", name, len)); } static void do_add_man_viewer_info(const char *name, size_t len, const char *value) { struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1); strncpy(new->name, name, len); new->info = strdup(value); new->next = man_viewer_info_list; man_viewer_info_list = new; } static int add_man_viewer_path(const char *name, size_t len, const char *value) { if (supported_man_viewer(name, len)) do_add_man_viewer_info(name, len, value); else warning("'%s': path for unsupported man viewer.\n" "Please consider using 'man.<tool>.cmd' instead.", name); return 0; } static int add_man_viewer_cmd(const char *name, size_t len, const char *value) { if (supported_man_viewer(name, len)) warning("'%s': cmd for supported man viewer.\n" "Please consider using 'man.<tool>.path' instead.", name); else do_add_man_viewer_info(name, len, value); return 0; } static int add_man_viewer_info(const char *var, const char *value) { const char *name = var + 4; const char *subkey = strrchr(name, '.'); if (!subkey) return error("Config with no key for man viewer: %s", name); if (!strcmp(subkey, ".path")) { if (!value) return config_error_nonbool(var); return add_man_viewer_path(name, subkey - name, value); } if (!strcmp(subkey, ".cmd")) { if (!value) return config_error_nonbool(var); return add_man_viewer_cmd(name, subkey - name, value); } warning("'%s': unsupported man viewer sub key.", subkey); return 0; } static int perf_help_config(const char *var, const char *value, void *cb) { if (!strcmp(var, "help.format")) { if (!value) return config_error_nonbool(var); help_format = parse_help_format(value); return 0; } if (!strcmp(var, "man.viewer")) { if (!value) return config_error_nonbool(var); add_man_viewer(value); return 0; } if (!prefixcmp(var, "man.")) return add_man_viewer_info(var, value); return perf_default_config(var, value, cb); } static struct cmdnames main_cmds, other_cmds; void list_common_cmds_help(void) { unsigned int i, longest = 0; for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { if (longest < strlen(common_cmds[i].name)) longest = strlen(common_cmds[i].name); } puts(" The most commonly used perf commands are:"); for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { printf(" %-*s ", longest, common_cmds[i].name); puts(common_cmds[i].help); } } static int is_perf_command(const char *s) { return is_in_cmdlist(&main_cmds, s) || is_in_cmdlist(&other_cmds, s); } static const char *prepend(const char *prefix, const char *cmd) { size_t pre_len = strlen(prefix); size_t cmd_len = strlen(cmd); char *p = malloc(pre_len + cmd_len + 1); memcpy(p, prefix, pre_len); strcpy(p + pre_len, cmd); return p; } static const char *cmd_to_page(const char *perf_cmd) { if (!perf_cmd) return "perf"; else if (!prefixcmp(perf_cmd, "perf")) return perf_cmd; else return prepend("perf-", perf_cmd); } static void setup_man_path(void) { struct strbuf new_path = STRBUF_INIT; const char *old_path = getenv("MANPATH"); /* We should always put ':' after our path. If there is no * old_path, the ':' at the end will let 'man' to try * system-wide paths after ours to find the manual page. If * there is old_path, we need ':' as delimiter. */ strbuf_addstr(&new_path, system_path(PERF_MAN_PATH)); strbuf_addch(&new_path, ':'); if (old_path) strbuf_addstr(&new_path, old_path); setenv("MANPATH", new_path.buf, 1); strbuf_release(&new_path); } static void exec_viewer(const char *name, const char *page) { const char *info = get_man_viewer_info(name); if (!strcasecmp(name, "man")) exec_man_man(info, page); else if (!strcasecmp(name, "woman")) exec_woman_emacs(info, page); else if (!strcasecmp(name, "konqueror")) exec_man_konqueror(info, page); else if (info) exec_man_cmd(info, page); else warning("'%s': unknown man viewer.", name); } static void show_man_page(const char *perf_cmd) { struct man_viewer_list *viewer; const char *page = cmd_to_page(perf_cmd); const char *fallback = getenv("PERF_MAN_VIEWER"); setup_man_path(); for (viewer = man_viewer_list; viewer; viewer = viewer->next) exec_viewer(viewer->name, page); /* will return when unable */ if (fallback) exec_viewer(fallback, page); exec_viewer("man", page); die("no man viewer handled the request"); } static void show_info_page(const char *perf_cmd) { const char *page = cmd_to_page(perf_cmd); setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); execlp("info", "info", "perfman", page, NULL); } static void get_html_page_path(struct strbuf *page_path, const char *page) { struct stat st; const char *html_path = system_path(PERF_HTML_PATH); /* Check that we have a perf documentation directory. */ if (stat(mkpath("%s/perf.html", html_path), &st) || !S_ISREG(st.st_mode)) die("'%s': not a documentation directory.", html_path); strbuf_init(page_path, 0); strbuf_addf(page_path, "%s/%s.html", html_path, page); } /* * If open_html is not defined in a platform-specific way (see for * example compat/mingw.h), we use the script web--browse to display * HTML. */ #ifndef open_html static void open_html(const char *path) { execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); } #endif static void show_html_page(const char *perf_cmd) { const char *page = cmd_to_page(perf_cmd); struct strbuf page_path; /* it leaks but we exec bellow */ get_html_page_path(&page_path, page); open_html(page_path.buf); } int cmd_help(int argc, const char **argv, const char *prefix __used) { const char *alias; load_command_list("perf-", &main_cmds, &other_cmds); perf_config(perf_help_config, NULL); argc = parse_options(argc, argv, builtin_help_options, builtin_help_usage, 0); if (show_all) { printf("\n usage: %s\n\n", perf_usage_string); list_commands("perf commands", &main_cmds, &other_cmds); printf(" %s\n\n", perf_more_info_string); return 0; } if (!argv[0]) { printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); printf("\n %s\n\n", perf_more_info_string); return 0; } alias = alias_lookup(argv[0]); if (alias && !is_perf_command(argv[0])) { printf("`perf %s' is aliased to `%s'\n", argv[0], alias); return 0; } switch (help_format) { case HELP_FORMAT_MAN: show_man_page(argv[0]); break; case HELP_FORMAT_INFO: show_info_page(argv[0]); break; case HELP_FORMAT_WEB: show_html_page(argv[0]); default: break; } return 0; }
gpl-2.0
KINGbabasula/KING_kernel
arch/x86/pci/irq.c
10483
33408
/* * Low-Level PCI Support for PC -- Routing of Interrupts * * (c) 1999--2000 Martin Mares <mj@ucw.cz> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/smp.h> #include <asm/io_apic.h> #include <linux/irq.h> #include <linux/acpi.h> #include <asm/pci_x86.h> #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_VERSION 0x0100 static int broken_hp_bios_irq9; static int acer_tm360_irqrouting; static struct irq_routing_table *pirq_table; static int pirq_enable_irq(struct pci_dev *dev); /* * Never use: 0, 1, 2 (timer, keyboard, and cascade) * Avoid using: 13, 14 and 15 (FP error and IDE). * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse) */ unsigned int pcibios_irq_mask = 0xfff8; static int pirq_penalty[16] = { 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000, 0, 0, 0, 0, 1000, 100000, 100000, 100000 }; struct irq_router { char *name; u16 vendor, device; int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); }; struct irq_router_handler { u16 vendor; int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); }; int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; /* * Check passed address for the PCI IRQ Routing Table signature * and perform checksum verification. */ static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr) { struct irq_routing_table *rt; int i; u8 sum; rt = (struct irq_routing_table *) addr; if (rt->signature != PIRQ_SIGNATURE || rt->version != PIRQ_VERSION || rt->size % 16 || rt->size < sizeof(struct irq_routing_table)) return NULL; sum = 0; for (i = 0; i < rt->size; i++) sum += addr[i]; if (!sum) { DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt); return rt; } return NULL; } /* * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. */ static struct irq_routing_table * __init pirq_find_routing_table(void) { u8 *addr; struct irq_routing_table *rt; if (pirq_table_addr) { rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr)); if (rt) return rt; printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n"); } for (addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) { rt = pirq_check_routing_table(addr); if (rt) return rt; } return NULL; } /* * If we have a IRQ routing table, use it to search for peer host * bridges. It's a gross hack, but since there are no other known * ways how to get a list of buses, we have to go this way. */ static void __init pirq_peer_trick(void) { struct irq_routing_table *rt = pirq_table; u8 busmap[256]; int i; struct irq_info *e; memset(busmap, 0, sizeof(busmap)); for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) { e = &rt->slots[i]; #ifdef DEBUG { int j; DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot); for (j = 0; j < 4; j++) DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap); DBG("\n"); } #endif busmap[e->bus] = 1; } for (i = 1; i < 256; i++) { int node; if (!busmap[i] || pci_find_bus(0, i)) continue; node = get_mp_bus_to_node(i); if (pci_scan_bus_on_node(i, &pci_root_ops, node)) printk(KERN_INFO "PCI: Discovered primary peer " "bus %02x [IRQ]\n", i); } pcibios_last_bus = -1; } /* * Code for querying and setting of IRQ routes on various interrupt routers. */ void eisa_set_level_irq(unsigned int irq) { unsigned char mask = 1 << (irq & 7); unsigned int port = 0x4d0 + (irq >> 3); unsigned char val; static u16 eisa_irq_mask; if (irq >= 16 || (1 << irq) & eisa_irq_mask) return; eisa_irq_mask |= (1 << irq); printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq); val = inb(port); if (!(val & mask)) { DBG(KERN_DEBUG " -> edge"); outb(val | mask, port); } } /* * Common IRQ routing practice: nibbles in config space, * offset by some magic constant. */ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); return (nr & 1) ? (x >> 4) : (x & 0xf); } static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); pci_write_config_byte(router, reg, x); } /* * ALI pirq entries are damn ugly, and completely undocumented. * This has been figured out from pirq tables, and it's not a pretty * picture. */ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; WARN_ON_ONCE(pirq > 16); return irqmap[read_config_nybble(router, 0x48, pirq-1)]; } static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; unsigned int val = irqmap[irq]; WARN_ON_ONCE(pirq > 16); if (val) { write_config_nybble(router, 0x48, pirq-1, val); return 1; } return 0; } /* * The Intel PIIX4 pirq rules are fairly simple: "pirq" is * just a pointer to the config space. */ static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; pci_read_config_byte(router, pirq, &x); return (x < 16) ? x : 0; } static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { pci_write_config_byte(router, pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, PIRQD is in the upper instead of lower 4 bits. */ static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq); } static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, for 82C586, nibble map is different . */ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); return read_config_nybble(router, 0x55, pirqmap[pirq-1]); } static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); return 1; } /* * ITE 8330G pirq rules are nibble-based * FIXME: pirqmap may be { 1, 0, 3, 2 }, * 2+3 are both mapped to irq 9 on my system */ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); return read_config_nybble(router, 0x43, pirqmap[pirq-1]); } static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); return 1; } /* * OPTI: high four bits are nibble pointer.. * I wonder what the low bits do? */ static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0xb8, pirq >> 4); } static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0xb8, pirq >> 4, irq); return 1; } /* * Cyrix: nibble offset 0x5C * 0x5C bits 7:4 is INTB bits 3:0 is INTA * 0x5D bits 7:4 is INTD bits 3:0 is INTC */ static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x5C, (pirq-1)^1); } static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x5C, (pirq-1)^1, irq); return 1; } /* * PIRQ routing for SiS 85C503 router used in several SiS chipsets. * We have to deal with the following issues here: * - vendors have different ideas about the meaning of link values * - some onboard devices (integrated in the chipset) have special * links and are thus routed differently (i.e. not via PCI INTA-INTD) * - different revision of the router have a different layout for * the routing registers, particularly for the onchip devices * * For all routing registers the common thing is we have one byte * per routeable link which is defined as: * bit 7 IRQ mapping enabled (0) or disabled (1) * bits [6:4] reserved (sometimes used for onchip devices) * bits [3:0] IRQ to map to * allowed: 3-7, 9-12, 14-15 * reserved: 0, 1, 2, 8, 13 * * The config-space registers located at 0x41/0x42/0x43/0x44 are * always used to route the normal PCI INT A/B/C/D respectively. * Apparently there are systems implementing PCI routing table using * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D. * We try our best to handle both link mappings. * * Currently (2003-05-21) it appears most SiS chipsets follow the * definition of routing registers from the SiS-5595 southbridge. * According to the SiS 5595 datasheets the revision id's of the * router (ISA-bridge) should be 0x01 or 0xb0. * * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1. * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets. * They seem to work with the current routing code. However there is * some concern because of the two USB-OHCI HCs (original SiS 5595 * had only one). YMMV. * * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1: * * 0x61: IDEIRQ: * bits [6:5] must be written 01 * bit 4 channel-select primary (0), secondary (1) * * 0x62: USBIRQ: * bit 6 OHCI function disabled (0), enabled (1) * * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved * * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved * * We support USBIRQ (in addition to INTA-INTD) and keep the * IDE, ACPI and DAQ routing untouched as set by the BIOS. * * Currently the only reported exception is the new SiS 65x chipset * which includes the SiS 69x southbridge. Here we have the 85C503 * router revision 0x04 and there are changes in the register layout * mostly related to the different USB HCs with USB 2.0 support. * * Onchip routing for router rev-id 0x04 (try-and-error observation) * * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs * bit 6-4 are probably unused, not like 5595 */ #define PIRQ_SIS_IRQ_MASK 0x0f #define PIRQ_SIS_IRQ_DISABLE 0x80 #define PIRQ_SIS_USB_ENABLE 0x40 static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); } static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE); x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE; pci_write_config_byte(router, reg, x); return 1; } /* * VLSI: nibble offset 0x74 - educated guess due to routing table and * config space of VLSI 82C534 PCI-bridge/router (1004:0102) * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6 * for the busbridge to the docking station. */ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } return read_config_nybble(router, 0x74, pirq-1); } static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } write_config_nybble(router, 0x74, pirq-1, irq); return 1; } /* * ServerWorks: PCI interrupts mapped to system IRQ lines through Index * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect * register is a straight binary coding of desired PIC IRQ (low nibble). * * The 'link' value in the PIRQ table is already in the correct format * for the Index register. There are some special index values: * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1, * and 0x03 for SMBus. */ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(pirq, 0xc00); return inb(0xc01) & 0xf; } static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { outb(pirq, 0xc00); outb(irq, 0xc01); return 1; } /* Support for AMD756 PCI IRQ Routing * Jhon H. Caicedo <jhcaiced@osso.org.co> * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced) * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced) * The AMD756 pirq rules are nibble-based * offset 0x56 0-3 PIRQA 4-7 PIRQB * offset 0x57 0-3 PIRQC 4-7 PIRQD */ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 irq; irq = 0; if (pirq <= 4) irq = read_config_nybble(router, 0x56, pirq - 1); dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n", dev->vendor, dev->device, pirq, irq); return irq; } static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n", dev->vendor, dev->device, pirq, irq); if (pirq <= 4) write_config_nybble(router, 0x56, pirq - 1, irq); return 1; } /* * PicoPower PT86C523 */ static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(0x10 + ((pirq - 1) >> 1), 0x24); return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf); } static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { unsigned int x; outb(0x10 + ((pirq - 1) >> 1), 0x24); x = inb(0x26); x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq)); outb(x, 0x26); return 1; } #ifdef CONFIG_PCI_BIOS static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { struct pci_dev *bridge; int pin = pci_get_interrupt_pin(dev, &bridge); return pcibios_set_irq_routing(bridge, pin - 1, irq); } #endif static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { static struct pci_device_id __initdata pirq_440gx[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, { }, }; /* 440GX has a proprietary PIRQ router -- don't use it */ if (pci_dev_present(pirq_440gx)) return 0; switch (device) { case PCI_DEVICE_ID_INTEL_82371FB_0: case PCI_DEVICE_ID_INTEL_82371SB_0: case PCI_DEVICE_ID_INTEL_82371AB_0: case PCI_DEVICE_ID_INTEL_82371MX: case PCI_DEVICE_ID_INTEL_82443MX_0: case PCI_DEVICE_ID_INTEL_82801AA_0: case PCI_DEVICE_ID_INTEL_82801AB_0: case PCI_DEVICE_ID_INTEL_82801BA_0: case PCI_DEVICE_ID_INTEL_82801BA_10: case PCI_DEVICE_ID_INTEL_82801CA_0: case PCI_DEVICE_ID_INTEL_82801CA_12: case PCI_DEVICE_ID_INTEL_82801DB_0: case PCI_DEVICE_ID_INTEL_82801E_0: case PCI_DEVICE_ID_INTEL_82801EB_0: case PCI_DEVICE_ID_INTEL_ESB_1: case PCI_DEVICE_ID_INTEL_ICH6_0: case PCI_DEVICE_ID_INTEL_ICH6_1: case PCI_DEVICE_ID_INTEL_ICH7_0: case PCI_DEVICE_ID_INTEL_ICH7_1: case PCI_DEVICE_ID_INTEL_ICH7_30: case PCI_DEVICE_ID_INTEL_ICH7_31: case PCI_DEVICE_ID_INTEL_TGP_LPC: case PCI_DEVICE_ID_INTEL_ESB2_0: case PCI_DEVICE_ID_INTEL_ICH8_0: case PCI_DEVICE_ID_INTEL_ICH8_1: case PCI_DEVICE_ID_INTEL_ICH8_2: case PCI_DEVICE_ID_INTEL_ICH8_3: case PCI_DEVICE_ID_INTEL_ICH8_4: case PCI_DEVICE_ID_INTEL_ICH9_0: case PCI_DEVICE_ID_INTEL_ICH9_1: case PCI_DEVICE_ID_INTEL_ICH9_2: case PCI_DEVICE_ID_INTEL_ICH9_3: case PCI_DEVICE_ID_INTEL_ICH9_4: case PCI_DEVICE_ID_INTEL_ICH9_5: case PCI_DEVICE_ID_INTEL_EP80579_0: case PCI_DEVICE_ID_INTEL_ICH10_0: case PCI_DEVICE_ID_INTEL_ICH10_1: case PCI_DEVICE_ID_INTEL_ICH10_2: case PCI_DEVICE_ID_INTEL_ICH10_3: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) { r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } return 0; } static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { /* FIXME: We should move some of the quirk fixup stuff here */ /* * workarounds for some buggy BIOSes */ if (device == PCI_DEVICE_ID_VIA_82C586_0) { switch (router->device) { case PCI_DEVICE_ID_VIA_82C686: /* * Asus k7m bios wrongly reports 82C686A * as 586-compatible */ device = PCI_DEVICE_ID_VIA_82C686; break; case PCI_DEVICE_ID_VIA_8235: /** * Asus a7v-x bios wrongly reports 8235 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8235; break; case PCI_DEVICE_ID_VIA_8237: /** * Asus a7v600 bios wrongly reports 8237 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8237; break; } } switch (device) { case PCI_DEVICE_ID_VIA_82C586_0: r->name = "VIA"; r->get = pirq_via586_get; r->set = pirq_via586_set; return 1; case PCI_DEVICE_ID_VIA_82C596: case PCI_DEVICE_ID_VIA_82C686: case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8235: case PCI_DEVICE_ID_VIA_8237: /* FIXME: add new ones for 8233/5 */ r->name = "VIA"; r->get = pirq_via_get; r->set = pirq_via_set; return 1; } return 0; } static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_VLSI_82C534: r->name = "VLSI 82C534"; r->get = pirq_vlsi_get; r->set = pirq_vlsi_set; return 1; } return 0; } static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_SERVERWORKS_OSB4: case PCI_DEVICE_ID_SERVERWORKS_CSB5: r->name = "ServerWorks"; r->get = pirq_serverworks_get; r->set = pirq_serverworks_set; return 1; } return 0; } static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { if (device != PCI_DEVICE_ID_SI_503) return 0; r->name = "SIS"; r->get = pirq_sis_get; r->set = pirq_sis_set; return 1; } static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_CYRIX_5520: r->name = "NatSemi"; r->get = pirq_cyrix_get; r->set = pirq_cyrix_set; return 1; } return 0; } static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_OPTI_82C700: r->name = "OPTI"; r->get = pirq_opti_get; r->set = pirq_opti_set; return 1; } return 0; } static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_ITE_IT8330G_0: r->name = "ITE"; r->get = pirq_ite_get; r->set = pirq_ite_set; return 1; } return 0; } static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AL_M1533: case PCI_DEVICE_ID_AL_M1563: r->name = "ALI"; r->get = pirq_ali_get; r->set = pirq_ali_set; return 1; } return 0; } static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AMD_VIPER_740B: r->name = "AMD756"; break; case PCI_DEVICE_ID_AMD_VIPER_7413: r->name = "AMD766"; break; case PCI_DEVICE_ID_AMD_VIPER_7443: r->name = "AMD768"; break; default: return 0; } r->get = pirq_amd756_get; r->set = pirq_amd756_set; return 1; } static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_PICOPOWER_PT86C523: r->name = "PicoPower PT86C523"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP: r->name = "PicoPower PT86C523 rev. BB+"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; } return 0; } static __initdata struct irq_router_handler pirq_routers[] = { { PCI_VENDOR_ID_INTEL, intel_router_probe }, { PCI_VENDOR_ID_AL, ali_router_probe }, { PCI_VENDOR_ID_ITE, ite_router_probe }, { PCI_VENDOR_ID_VIA, via_router_probe }, { PCI_VENDOR_ID_OPTI, opti_router_probe }, { PCI_VENDOR_ID_SI, sis_router_probe }, { PCI_VENDOR_ID_CYRIX, cyrix_router_probe }, { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, { PCI_VENDOR_ID_AMD, amd_router_probe }, { PCI_VENDOR_ID_PICOPOWER, pico_router_probe }, /* Someone with docs needs to add the ATI Radeon IGP */ { 0, NULL } }; static struct irq_router pirq_router; static struct pci_dev *pirq_router_dev; /* * FIXME: should we have an option to say "generic for * chipset" ? */ static void __init pirq_find_router(struct irq_router *r) { struct irq_routing_table *rt = pirq_table; struct irq_router_handler *h; #ifdef CONFIG_PCI_BIOS if (!rt->signature) { printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n"); r->set = pirq_bios_set; r->name = "BIOS"; return; } #endif /* Default unless a driver reloads it */ r->name = "default"; r->get = NULL; r->set = NULL; DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n", rt->rtr_vendor, rt->rtr_device); pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); if (!pirq_router_dev) { DBG(KERN_DEBUG "PCI: Interrupt router not found at " "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); return; } for (h = pirq_routers; h->vendor; h++) { /* First look for a router match */ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) break; /* Fall back to a device match */ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) break; } dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n", pirq_router.name, pirq_router_dev->vendor, pirq_router_dev->device); /* The device remains referenced for the kernel lifetime */ } static struct irq_info *pirq_get_info(struct pci_dev *dev) { struct irq_routing_table *rt = pirq_table; int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); struct irq_info *info; for (info = rt->slots; entries--; info++) if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) return info; return NULL; } static int pcibios_lookup_irq(struct pci_dev *dev, int assign) { u8 pin; struct irq_info *info; int i, pirq, newirq; int irq = 0; u32 mask; struct irq_router *r = &pirq_router; struct pci_dev *dev2 = NULL; char *msg = NULL; /* Find IRQ pin */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) { dev_dbg(&dev->dev, "no interrupt pin\n"); return 0; } if (io_apic_assign_pci_irqs) return 0; /* Find IRQ routing entry */ if (!pirq_table) return 0; info = pirq_get_info(dev); if (!info) { dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", 'A' + pin - 1); return 0; } pirq = info->irq[pin - 1].link; mask = info->irq[pin - 1].bitmap; if (!pirq) { dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); return 0; } dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); mask &= pcibios_irq_mask; /* Work around broken HP Pavilion Notebooks which assign USB to IRQ 9 even though it is actually wired to IRQ 11 */ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) { dev->irq = 11; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); r->set(pirq_router_dev, dev, pirq, 11); } /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) { pirq = 0x68; mask = 0x400; dev->irq = r->get(pirq_router_dev, dev, pirq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } /* * Find the best IRQ to assign: use the one * reported by the device if possible. */ newirq = dev->irq; if (newirq && !((1 << newirq) & mask)) { if (pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; else dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask " "%#x; try pci=usepirqmask\n", newirq, mask); } if (!newirq && assign) { for (i = 0; i < 16; i++) { if (!(mask & (1 << i))) continue; if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED)) newirq = i; } } dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); /* Check if it is hardcoded */ if ((pirq & 0xf0) == 0xf0) { irq = pirq & 0xf; msg = "hardcoded"; } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { msg = "found"; eisa_set_level_irq(irq); } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { if (r->set(pirq_router_dev, dev, pirq, newirq)) { eisa_set_level_irq(newirq); msg = "assigned"; irq = newirq; } } if (!irq) { if (newirq && mask == (1 << newirq)) { msg = "guessed"; irq = newirq; } else { dev_dbg(&dev->dev, "can't route interrupt\n"); return 0; } } dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); /* Update IRQ for all devices with the same pirq value */ for_each_pci_dev(dev2) { pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; info = pirq_get_info(dev2); if (!info) continue; if (info->irq[pin - 1].link == pirq) { /* * We refuse to override the dev->irq * information. Give a warning! */ if (dev2->irq && dev2->irq != irq && \ (!(pci_probe & PCI_USE_PIRQ_MASK) || \ ((1 << dev2->irq) & mask))) { #ifndef CONFIG_PCI_MSI dev_info(&dev2->dev, "IRQ routing conflict: " "have IRQ %d, want IRQ %d\n", dev2->irq, irq); #endif continue; } dev2->irq = irq; pirq_penalty[irq]++; if (dev != dev2) dev_info(&dev->dev, "sharing IRQ %d with %s\n", irq, pci_name(dev2)); } } return 1; } void __init pcibios_fixup_irqs(void) { struct pci_dev *dev = NULL; u8 pin; DBG(KERN_DEBUG "PCI: IRQ fixup\n"); for_each_pci_dev(dev) { /* * If the BIOS has set an out of range IRQ number, just * ignore it. Also keep track of which IRQ's are * already in use. */ if (dev->irq >= 16) { dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq); dev->irq = 0; } /* * If the IRQ is already assigned to a PCI device, * ignore its ISA use penalty */ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) pirq_penalty[dev->irq] = 0; pirq_penalty[dev->irq]++; } if (io_apic_assign_pci_irqs) return; dev = NULL; for_each_pci_dev(dev) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; /* * Still no IRQ? Try to lookup one... */ if (!dev->irq) pcibios_lookup_irq(dev, 0); } } /* * Work around broken HP Pavilion Notebooks which assign USB to * IRQ 9 even though it is actually wired to IRQ 11 */ static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d) { if (!broken_hp_bios_irq9) { broken_hp_bios_irq9 = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } /* * Work around broken Acer TravelMate 360 Notebooks which assign * Cardbus to IRQ 11 even though it is actually wired to IRQ 10 */ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) { if (!acer_tm360_irqrouting) { acer_tm360_irqrouting = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } static struct dmi_system_id __initdata pciirq_dmi_table[] = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"), DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .callback = fix_acer_tm360_irqrouting, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, { } }; void __init pcibios_irq_init(void) { DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) return; dmi_check_system(pciirq_dmi_table); pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) pirq_table = pcibios_get_irq_routing_table(); #endif if (pirq_table) { pirq_peer_trick(); pirq_find_router(&pirq_router); if (pirq_table->exclusive_irqs) { int i; for (i = 0; i < 16; i++) if (!(pirq_table->exclusive_irqs & (1 << i))) pirq_penalty[i] += 100; } /* * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ if (io_apic_assign_pci_irqs) pirq_table = NULL; } x86_init.pci.fixup_irqs(); if (io_apic_assign_pci_irqs && pci_routeirq) { struct pci_dev *dev = NULL; /* * PCI IRQ routing is set up by pci_enable_device(), but we * also do it here in case there are still broken drivers that * don't use pci_enable_device(). */ printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); for_each_pci_dev(dev) pirq_enable_irq(dev); } } static void pirq_penalize_isa_irq(int irq, int active) { /* * If any ISAPnP device reports an IRQ in its list of possible * IRQ's, we try to avoid assigning it to PCI devices. */ if (irq < 16) { if (active) pirq_penalty[irq] += 1000; else pirq_penalty[irq] += 100; } } void pcibios_penalize_isa_irq(int irq, int active) { #ifdef CONFIG_ACPI if (!acpi_noirq) acpi_penalize_isa_irq(irq, active); else #endif pirq_penalize_isa_irq(irq, active); } static int pirq_enable_irq(struct pci_dev *dev) { u8 pin; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin && !pcibios_lookup_irq(dev, 1)) { char *msg = ""; if (!io_apic_assign_pci_irqs && dev->irq) return 0; if (io_apic_assign_pci_irqs) { #ifdef CONFIG_X86_IO_APIC struct pci_dev *temp_dev; int irq; struct io_apic_irq_attr irq_attr; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1, &irq_attr); /* * Busses behind bridges are typically not listed in the MP-table. * In this case we have to look up the IRQ based on the parent bus, * parent slot, and pin number. The SMP code detects such bridged * busses itself so we should get into this branch reliably. */ temp_dev = dev; while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ struct pci_dev *bridge = dev->bus->self; pin = pci_swizzle_interrupt_pin(dev, pin); irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin - 1, &irq_attr); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s " "INT %c to get IRQ %d\n", pci_name(bridge), 'A' + pin - 1, irq); dev = bridge; } dev = temp_dev; if (irq >= 0) { io_apic_set_pci_routing(&dev->dev, irq, &irq_attr); dev->irq = irq; dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; } else msg = "; probably buggy MP table"; #endif } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else msg = "; please try using pci=biosirq"; /* * With IDE legacy devices the IRQ lookup failure is not * a problem.. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) return 0; dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", 'A' + pin - 1, msg); } return 0; }
gpl-2.0
championswimmer/android_kernel_sony_seagull
net/rds/rdma.c
11251
23017
/* * Copyright (c) 2007 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ #include "rds.h" /* * XXX * - build with sparse * - should we limit the size of a mr region? let transport return failure? * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit? */ /* * get the number of pages by looking at the page indices that the start and * end addresses fall in. * * Returns 0 if the vec is invalid. It is invalid if the number of bytes * causes the address to wrap or overflows an unsigned int. This comes * from being stored in the 'length' member of 'struct scatterlist'. */ static unsigned int rds_pages_in_vec(struct rds_iovec *vec) { if ((vec->addr + vec->bytes <= vec->addr) || (vec->bytes > (u64)UINT_MAX)) return 0; return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - (vec->addr >> PAGE_SHIFT); } static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, struct rds_mr *insert) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct rds_mr *mr; while (*p) { parent = *p; mr = rb_entry(parent, struct rds_mr, r_rb_node); if (key < mr->r_key) p = &(*p)->rb_left; else if (key > mr->r_key) p = &(*p)->rb_right; else return mr; } if (insert) { rb_link_node(&insert->r_rb_node, parent, p); rb_insert_color(&insert->r_rb_node, root); atomic_inc(&insert->r_refcount); } return NULL; } /* * Destroy the transport-specific part of a MR. */ static void rds_destroy_mr(struct rds_mr *mr) { struct rds_sock *rs = mr->r_sock; void *trans_private = NULL; unsigned long flags; rdsdebug("RDS: destroy mr key is %x refcnt %u\n", mr->r_key, atomic_read(&mr->r_refcount)); if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) return; spin_lock_irqsave(&rs->rs_rdma_lock, flags); if (!RB_EMPTY_NODE(&mr->r_rb_node)) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); trans_private = mr->r_trans_private; mr->r_trans_private = NULL; spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (trans_private) mr->r_trans->free_mr(trans_private, mr->r_invalidate); } void __rds_put_mr_final(struct rds_mr *mr) { rds_destroy_mr(mr); kfree(mr); } /* * By the time this is called we can't have any more ioctls called on * the socket so we don't need to worry about racing with others. */ void rds_rdma_drop_keys(struct rds_sock *rs) { struct rds_mr *mr; struct rb_node *node; unsigned long flags; /* Release any MRs associated with this socket */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); while ((node = rb_first(&rs->rs_rdma_keys))) { mr = container_of(node, struct rds_mr, r_rb_node); if (mr->r_trans == rs->rs_transport) mr->r_invalidate = 0; rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); rds_destroy_mr(mr); rds_mr_put(mr); spin_lock_irqsave(&rs->rs_rdma_lock, flags); } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (rs->rs_transport && rs->rs_transport->flush_mrs) rs->rs_transport->flush_mrs(); } /* * Helper function to pin user pages. */ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, struct page **pages, int write) { int ret; ret = get_user_pages_fast(user_addr, nr_pages, write, pages); if (ret >= 0 && ret < nr_pages) { while (ret--) put_page(pages[ret]); ret = -EFAULT; } return ret; } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } atomic_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { atomic_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; } int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; /* * Initially, just behave like get_mr(). * TODO: Implement get_mr as wrapper around this * and deprecate it. */ new_args.vec = args.vec; new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; return __rds_rdma_map(rs, &new_args, NULL, NULL); } /* * Free the MR indicated by the given R_Key */ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; unsigned long flags; if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ if (args.cookie == 0) { if (!rs->rs_transport || !rs->rs_transport->flush_mrs) return -EINVAL; rs->rs_transport->flush_mrs(); return 0; } /* Look up the MR given its R_key and remove it from the rbtree * so nobody else finds it. * This should also prevent races with rds_rdma_unuse. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); if (mr) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); if (args.flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (!mr) return -EINVAL; /* * call rds_destroy_mr() ourselves so that we're sure it's done by the time * we return. If we let rds_mr_put() do it it might not happen until * someone else drops their ref. */ rds_destroy_mr(mr); rds_mr_put(mr); return 0; } /* * This is called when we receive an extension header that * tells us this MR was used. It allows us to implement * use_once semantics */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) { struct rds_mr *mr; unsigned long flags; int zot_me = 0; spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) { printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return; } if (mr->r_use_once || force) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); /* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ, * but at this point we can't tell. */ if (mr->r_trans->sync_mr) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); /* If the MR was marked as invalidate, this will * trigger an async flush. */ if (zot_me) rds_destroy_mr(mr); rds_mr_put(mr); } void rds_rdma_free_op(struct rm_rdma_op *ro) { unsigned int i; for (i = 0; i < ro->op_nents; i++) { struct page *page = sg_page(&ro->op_sg[i]); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ if (!ro->op_write) { BUG_ON(irqs_disabled()); set_page_dirty(page); } put_page(page); } kfree(ro->op_notifier); ro->op_notifier = NULL; ro->op_active = 0; } void rds_atomic_free_op(struct rm_atomic_op *ao) { struct page *page = sg_page(ao->op_sg); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ set_page_dirty(page); put_page(page); kfree(ao->op_notifier); ao->op_notifier = NULL; ao->op_active = 0; } /* * Count the number of pages needed to describe an incoming iovec array. */ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) { int tot_pages = 0; unsigned int nr_pages; unsigned int i; /* figure out the number of pages in the vector */ for (i = 0; i < nr_iovecs; i++) { nr_pages = rds_pages_in_vec(&iov[i]); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages; } int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; int tot_pages = 0; unsigned int nr_pages; unsigned int i; local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], sizeof(struct rds_iovec))) return -EFAULT; nr_pages = rds_pages_in_vec(&vec); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages * sizeof(struct scatterlist); } /* * The application asks for a RDMA transfer. * Extract all arguments and set up the rdma_op */ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; int iov_size; unsigned int i, j; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || rm->rdma.op_active) return -EINVAL; args = CMSG_DATA(cmsg); if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out; } /* Check whether to allocate the iovec area */ iov_size = args->nr_local * sizeof(struct rds_iovec); if (args->nr_local > UIO_FASTIOV) { iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); if (!iovs) { ret = -ENOMEM; goto out; } } if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { ret = -EFAULT; goto out; } nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) { ret = -EINVAL; goto out; } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } op->op_write = !!(args->flags & RDS_RDMA_READWRITE); op->op_fence = !!(args->flags & RDS_RDMA_FENCE); op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); op->op_silent = !!(args->flags & RDS_RDMA_SILENT); op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); if (!op->op_sg) { ret = -ENOMEM; goto out; } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) { ret = -ENOMEM; goto out; } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; } /* The cookie contains the R_Key of the remote memory region, and * optionally an offset into it. This is how we implement RDMA into * unaligned memory. * When setting up the RDMA, we need to add that offset to the * destination address (which is really an offset into the MR) * FIXME: We may want to move this into ib_rdma.c */ op->op_rkey = rds_rdma_cookie_key(args->cookie); op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); nr_bytes = 0; rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", (unsigned long long)args->nr_local, (unsigned long long)args->remote_vec.addr, op->op_rkey); for (i = 0; i < args->nr_local; i++) { struct rds_iovec *iov = &iovs[i]; /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ unsigned int nr = rds_pages_in_vec(iov); rs->rs_user_addr = iov->addr; rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", nr_bytes, nr, iov->bytes, iov->addr); nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", sg->offset, sg->length, iov->addr, iov->bytes); iov->addr += sg->length; iov->bytes -= sg->length; } op->op_nents += nr; } if (nr_bytes > args->remote_vec.bytes) { rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", nr_bytes, (unsigned int) args->remote_vec.bytes); ret = -EINVAL; goto out; } op->op_bytes = nr_bytes; out: if (iovs != iovstack) sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); if (ret) rds_rdma_free_op(op); else rds_stats_inc(s_send_rdma); return ret; } /* * The application wants us to pass an RDMA destination (aka MR) * to the remote */ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { unsigned long flags; struct rds_mr *mr; u32 r_key; int err = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || rm->m_rdma_cookie != 0) return -EINVAL; memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); /* We are reusing a previously mapped MR here. Most likely, the * application has written to the buffer, so we need to explicitly * flush those writes to RAM. Otherwise the HCA may not see them * when doing a DMA from that buffer. */ r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else atomic_inc(&mr->r_refcount); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->rdma.op_rdma_mr = mr; } return err; } /* * The application passes us an address range it wants to enable RDMA * to/from. We map the area, and save the <R_Key,offset> pair * in rm->m_rdma_cookie. This causes it to be sent along to the peer * in an extension header. */ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || rm->m_rdma_cookie != 0) return -EINVAL; return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); } /* * Fill in rds_message for an atomic request. */ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct page *page = NULL; struct rds_atomic_args *args; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) || rm->atomic.op_active) return -EINVAL; args = CMSG_DATA(cmsg); /* Nonmasked & masked cmsg ops converted to masked hw ops */ switch (cmsg->cmsg_type) { case RDS_CMSG_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->fadd.add; rm->atomic.op_m_fadd.nocarry_mask = 0; break; case RDS_CMSG_MASKED_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->m_fadd.add; rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; break; case RDS_CMSG_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->cswp.compare; rm->atomic.op_m_cswp.swap = args->cswp.swap; rm->atomic.op_m_cswp.compare_mask = ~0; rm->atomic.op_m_cswp.swap_mask = ~0; break; case RDS_CMSG_MASKED_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->m_cswp.compare; rm->atomic.op_m_cswp.swap = args->m_cswp.swap; rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; break; default: BUG(); /* should never happen */ } rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); if (!rm->atomic.op_sg) { ret = -ENOMEM; goto err; } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { ret = -EFAULT; goto err; } ret = rds_pin_pages(args->local_addr, 1, &page, 1); if (ret != 1) goto err; ret = 0; sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); if (rm->atomic.op_notify || rm->atomic.op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); if (!rm->atomic.op_notifier) { ret = -ENOMEM; goto err; } rm->atomic.op_notifier->n_user_token = args->user_token; rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; } rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); return ret; err: if (page) put_page(page); kfree(rm->atomic.op_notifier); return ret; }
gpl-2.0
AntaresOne/android_kernel_samsung_jf
net/rds/rdma.c
11251
23017
/* * Copyright (c) 2007 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ #include "rds.h" /* * XXX * - build with sparse * - should we limit the size of a mr region? let transport return failure? * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit? */ /* * get the number of pages by looking at the page indices that the start and * end addresses fall in. * * Returns 0 if the vec is invalid. It is invalid if the number of bytes * causes the address to wrap or overflows an unsigned int. This comes * from being stored in the 'length' member of 'struct scatterlist'. */ static unsigned int rds_pages_in_vec(struct rds_iovec *vec) { if ((vec->addr + vec->bytes <= vec->addr) || (vec->bytes > (u64)UINT_MAX)) return 0; return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - (vec->addr >> PAGE_SHIFT); } static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, struct rds_mr *insert) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct rds_mr *mr; while (*p) { parent = *p; mr = rb_entry(parent, struct rds_mr, r_rb_node); if (key < mr->r_key) p = &(*p)->rb_left; else if (key > mr->r_key) p = &(*p)->rb_right; else return mr; } if (insert) { rb_link_node(&insert->r_rb_node, parent, p); rb_insert_color(&insert->r_rb_node, root); atomic_inc(&insert->r_refcount); } return NULL; } /* * Destroy the transport-specific part of a MR. */ static void rds_destroy_mr(struct rds_mr *mr) { struct rds_sock *rs = mr->r_sock; void *trans_private = NULL; unsigned long flags; rdsdebug("RDS: destroy mr key is %x refcnt %u\n", mr->r_key, atomic_read(&mr->r_refcount)); if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) return; spin_lock_irqsave(&rs->rs_rdma_lock, flags); if (!RB_EMPTY_NODE(&mr->r_rb_node)) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); trans_private = mr->r_trans_private; mr->r_trans_private = NULL; spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (trans_private) mr->r_trans->free_mr(trans_private, mr->r_invalidate); } void __rds_put_mr_final(struct rds_mr *mr) { rds_destroy_mr(mr); kfree(mr); } /* * By the time this is called we can't have any more ioctls called on * the socket so we don't need to worry about racing with others. */ void rds_rdma_drop_keys(struct rds_sock *rs) { struct rds_mr *mr; struct rb_node *node; unsigned long flags; /* Release any MRs associated with this socket */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); while ((node = rb_first(&rs->rs_rdma_keys))) { mr = container_of(node, struct rds_mr, r_rb_node); if (mr->r_trans == rs->rs_transport) mr->r_invalidate = 0; rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); rds_destroy_mr(mr); rds_mr_put(mr); spin_lock_irqsave(&rs->rs_rdma_lock, flags); } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (rs->rs_transport && rs->rs_transport->flush_mrs) rs->rs_transport->flush_mrs(); } /* * Helper function to pin user pages. */ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, struct page **pages, int write) { int ret; ret = get_user_pages_fast(user_addr, nr_pages, write, pages); if (ret >= 0 && ret < nr_pages) { while (ret--) put_page(pages[ret]); ret = -EFAULT; } return ret; } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } atomic_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { atomic_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; } int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; /* * Initially, just behave like get_mr(). * TODO: Implement get_mr as wrapper around this * and deprecate it. */ new_args.vec = args.vec; new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; return __rds_rdma_map(rs, &new_args, NULL, NULL); } /* * Free the MR indicated by the given R_Key */ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; unsigned long flags; if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ if (args.cookie == 0) { if (!rs->rs_transport || !rs->rs_transport->flush_mrs) return -EINVAL; rs->rs_transport->flush_mrs(); return 0; } /* Look up the MR given its R_key and remove it from the rbtree * so nobody else finds it. * This should also prevent races with rds_rdma_unuse. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); if (mr) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); if (args.flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (!mr) return -EINVAL; /* * call rds_destroy_mr() ourselves so that we're sure it's done by the time * we return. If we let rds_mr_put() do it it might not happen until * someone else drops their ref. */ rds_destroy_mr(mr); rds_mr_put(mr); return 0; } /* * This is called when we receive an extension header that * tells us this MR was used. It allows us to implement * use_once semantics */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) { struct rds_mr *mr; unsigned long flags; int zot_me = 0; spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) { printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return; } if (mr->r_use_once || force) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); /* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ, * but at this point we can't tell. */ if (mr->r_trans->sync_mr) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); /* If the MR was marked as invalidate, this will * trigger an async flush. */ if (zot_me) rds_destroy_mr(mr); rds_mr_put(mr); } void rds_rdma_free_op(struct rm_rdma_op *ro) { unsigned int i; for (i = 0; i < ro->op_nents; i++) { struct page *page = sg_page(&ro->op_sg[i]); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ if (!ro->op_write) { BUG_ON(irqs_disabled()); set_page_dirty(page); } put_page(page); } kfree(ro->op_notifier); ro->op_notifier = NULL; ro->op_active = 0; } void rds_atomic_free_op(struct rm_atomic_op *ao) { struct page *page = sg_page(ao->op_sg); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ set_page_dirty(page); put_page(page); kfree(ao->op_notifier); ao->op_notifier = NULL; ao->op_active = 0; } /* * Count the number of pages needed to describe an incoming iovec array. */ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) { int tot_pages = 0; unsigned int nr_pages; unsigned int i; /* figure out the number of pages in the vector */ for (i = 0; i < nr_iovecs; i++) { nr_pages = rds_pages_in_vec(&iov[i]); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages; } int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; int tot_pages = 0; unsigned int nr_pages; unsigned int i; local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], sizeof(struct rds_iovec))) return -EFAULT; nr_pages = rds_pages_in_vec(&vec); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages * sizeof(struct scatterlist); } /* * The application asks for a RDMA transfer. * Extract all arguments and set up the rdma_op */ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; int iov_size; unsigned int i, j; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || rm->rdma.op_active) return -EINVAL; args = CMSG_DATA(cmsg); if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out; } /* Check whether to allocate the iovec area */ iov_size = args->nr_local * sizeof(struct rds_iovec); if (args->nr_local > UIO_FASTIOV) { iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); if (!iovs) { ret = -ENOMEM; goto out; } } if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { ret = -EFAULT; goto out; } nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) { ret = -EINVAL; goto out; } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } op->op_write = !!(args->flags & RDS_RDMA_READWRITE); op->op_fence = !!(args->flags & RDS_RDMA_FENCE); op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); op->op_silent = !!(args->flags & RDS_RDMA_SILENT); op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); if (!op->op_sg) { ret = -ENOMEM; goto out; } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) { ret = -ENOMEM; goto out; } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; } /* The cookie contains the R_Key of the remote memory region, and * optionally an offset into it. This is how we implement RDMA into * unaligned memory. * When setting up the RDMA, we need to add that offset to the * destination address (which is really an offset into the MR) * FIXME: We may want to move this into ib_rdma.c */ op->op_rkey = rds_rdma_cookie_key(args->cookie); op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); nr_bytes = 0; rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", (unsigned long long)args->nr_local, (unsigned long long)args->remote_vec.addr, op->op_rkey); for (i = 0; i < args->nr_local; i++) { struct rds_iovec *iov = &iovs[i]; /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ unsigned int nr = rds_pages_in_vec(iov); rs->rs_user_addr = iov->addr; rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", nr_bytes, nr, iov->bytes, iov->addr); nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", sg->offset, sg->length, iov->addr, iov->bytes); iov->addr += sg->length; iov->bytes -= sg->length; } op->op_nents += nr; } if (nr_bytes > args->remote_vec.bytes) { rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", nr_bytes, (unsigned int) args->remote_vec.bytes); ret = -EINVAL; goto out; } op->op_bytes = nr_bytes; out: if (iovs != iovstack) sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); if (ret) rds_rdma_free_op(op); else rds_stats_inc(s_send_rdma); return ret; } /* * The application wants us to pass an RDMA destination (aka MR) * to the remote */ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { unsigned long flags; struct rds_mr *mr; u32 r_key; int err = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || rm->m_rdma_cookie != 0) return -EINVAL; memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); /* We are reusing a previously mapped MR here. Most likely, the * application has written to the buffer, so we need to explicitly * flush those writes to RAM. Otherwise the HCA may not see them * when doing a DMA from that buffer. */ r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else atomic_inc(&mr->r_refcount); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->rdma.op_rdma_mr = mr; } return err; } /* * The application passes us an address range it wants to enable RDMA * to/from. We map the area, and save the <R_Key,offset> pair * in rm->m_rdma_cookie. This causes it to be sent along to the peer * in an extension header. */ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || rm->m_rdma_cookie != 0) return -EINVAL; return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); } /* * Fill in rds_message for an atomic request. */ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct page *page = NULL; struct rds_atomic_args *args; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) || rm->atomic.op_active) return -EINVAL; args = CMSG_DATA(cmsg); /* Nonmasked & masked cmsg ops converted to masked hw ops */ switch (cmsg->cmsg_type) { case RDS_CMSG_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->fadd.add; rm->atomic.op_m_fadd.nocarry_mask = 0; break; case RDS_CMSG_MASKED_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->m_fadd.add; rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; break; case RDS_CMSG_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->cswp.compare; rm->atomic.op_m_cswp.swap = args->cswp.swap; rm->atomic.op_m_cswp.compare_mask = ~0; rm->atomic.op_m_cswp.swap_mask = ~0; break; case RDS_CMSG_MASKED_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->m_cswp.compare; rm->atomic.op_m_cswp.swap = args->m_cswp.swap; rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; break; default: BUG(); /* should never happen */ } rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); if (!rm->atomic.op_sg) { ret = -ENOMEM; goto err; } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { ret = -EFAULT; goto err; } ret = rds_pin_pages(args->local_addr, 1, &page, 1); if (ret != 1) goto err; ret = 0; sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); if (rm->atomic.op_notify || rm->atomic.op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); if (!rm->atomic.op_notifier) { ret = -ENOMEM; goto err; } rm->atomic.op_notifier->n_user_token = args->user_token; rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; } rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); return ret; err: if (page) put_page(page); kfree(rm->atomic.op_notifier); return ret; }
gpl-2.0
danielkutik/android_kernel_google_msm
sound/soc/msm/qdsp6v2/q6asm.c
500
94135
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <linux/debugfs.h> #include <linux/time.h> #include <linux/atomic.h> #include <asm/ioctls.h> #include <mach/memory.h> #include <mach/debug_mm.h> #include <mach/peripheral-loader.h> #include <mach/qdsp6v2/audio_acdb.h> #include <mach/qdsp6v2/rtac.h> #include <mach/msm_subsystem_map.h> #include <sound/apr_audio-v2.h> #include <sound/q6asm-v2.h> #define TRUE 0x01 #define FALSE 0x00 #define READDONE_IDX_STATUS 0 #define READDONE_IDX_BUFADD_LSW 1 #define READDONE_IDX_BUFADD_MSW 2 #define READDONE_IDX_MEMMAP_HDL 3 #define READDONE_IDX_SIZE 4 #define READDONE_IDX_OFFSET 5 #define READDONE_IDX_LSW_TS 6 #define READDONE_IDX_MSW_TS 7 #define READDONE_IDX_FLAGS 8 #define READDONE_IDX_NUMFRAMES 9 #define READDONE_IDX_SEQ_ID 10 /* TODO, combine them together */ static DEFINE_MUTEX(session_lock); struct asm_mmap { atomic_t ref_cnt; void *apr; }; static struct asm_mmap this_mmap; /* session id: 0 reserved */ static struct audio_client *session[SESSION_MAX+1]; struct asm_buffer_node { struct list_head list; uint32_t buf_addr_lsw; uint32_t mmap_hdl; }; static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv); static int32_t q6asm_callback(struct apr_client_data *data, void *priv); static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg); static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg); static int q6asm_memory_map_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt); static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt); static void q6asm_reset_buf_state(struct audio_client *ac); static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels); #ifdef CONFIG_DEBUG_FS #define OUT_BUFFER_SIZE 56 #define IN_BUFFER_SIZE 24 static struct timeval out_cold_tv; static struct timeval out_warm_tv; static struct timeval out_cont_tv; static struct timeval in_cont_tv; static long out_enable_flag; static long in_enable_flag; static struct dentry *out_dentry; static struct dentry *in_dentry; static int in_cont_index; /*This var is used to keep track of first write done for cold output latency */ static int out_cold_index; static char *out_buffer; static char *in_buffer; static int audio_output_latency_dbgfs_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audio_output_latency_dbgfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\ out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\ out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec); return simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos, out_buffer, OUT_BUFFER_SIZE); } static ssize_t audio_output_latency_dbgfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *temp; if (count > 2*sizeof(char)) return -EINVAL; else temp = kmalloc(2*sizeof(char), GFP_KERNEL); out_cold_index = 0; if (temp) { if (copy_from_user(temp, buf, 2*sizeof(char))) { kfree(temp); return -EFAULT; } if (!strict_strtol(temp, 10, &out_enable_flag)) { kfree(temp); return count; } kfree(temp); } return -EINVAL; } static const struct file_operations audio_output_latency_debug_fops = { .open = audio_output_latency_dbgfs_open, .read = audio_output_latency_dbgfs_read, .write = audio_output_latency_dbgfs_write }; static int audio_input_latency_dbgfs_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audio_input_latency_dbgfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\ in_cont_tv.tv_sec, in_cont_tv.tv_usec); return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos, in_buffer, IN_BUFFER_SIZE); } static ssize_t audio_input_latency_dbgfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *temp; if (count > 2*sizeof(char)) return -EINVAL; else temp = kmalloc(2*sizeof(char), GFP_KERNEL); if (temp) { if (copy_from_user(temp, buf, 2*sizeof(char))) { kfree(temp); return -EFAULT; } if (!strict_strtol(temp, 10, &in_enable_flag)) { kfree(temp); return count; } kfree(temp); } return -EINVAL; } static const struct file_operations audio_input_latency_debug_fops = { .open = audio_input_latency_dbgfs_open, .read = audio_input_latency_dbgfs_read, .write = audio_input_latency_dbgfs_write }; static void config_debug_fs_write_cb(void) { if (out_enable_flag) { /* For first Write done log the time and reset out_cold_index*/ if (out_cold_index != 1) { do_gettimeofday(&out_cold_tv); pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n", out_cold_tv.tv_sec,\ out_cold_tv.tv_usec); out_cold_index = 1; } pr_debug("out_enable_flag %ld",\ out_enable_flag); } } static void config_debug_fs_read_cb(void) { if (in_enable_flag) { /* when in_cont_index == 7, DSP would be * writing into the 8th 512 byte buffer and this * timestamp is tapped here.Once done it then writes * to 9th 512 byte buffer.These two buffers(8th, 9th) * reach the test application in 5th iteration and that * timestamp is tapped at user level. The difference * of these two timestamps gives us the time between * the time at which dsp started filling the sample * required and when it reached the test application. * Hence continuous input latency */ if (in_cont_index == 7) { do_gettimeofday(&in_cont_tv); pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n", in_cont_tv.tv_sec, in_cont_tv.tv_usec); } in_cont_index++; } } static void config_debug_fs_reset_index(void) { in_cont_index = 0; } static void config_debug_fs_run(void) { if (out_enable_flag) { do_gettimeofday(&out_cold_tv); pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",\ out_cold_tv.tv_sec, out_cold_tv.tv_usec); } } static void config_debug_fs_write(struct audio_buffer *ab) { if (out_enable_flag) { char zero_pattern[2] = {0x00, 0x00}; /* If First two byte is non zero and last two byte is zero then it is warm output pattern */ if ((strncmp(((char *)ab->data), zero_pattern, 2)) && (!strncmp(((char *)ab->data + 2), zero_pattern, 2))) { do_gettimeofday(&out_warm_tv); pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n", out_warm_tv.tv_sec,\ out_warm_tv.tv_usec); pr_debug("Warm Pattern Matched"); } /* If First two byte is zero and last two byte is non zero then it is cont ouput pattern */ else if ((!strncmp(((char *)ab->data), zero_pattern, 2)) && (strncmp(((char *)ab->data + 2), zero_pattern, 2))) { do_gettimeofday(&out_cont_tv); pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n", out_cont_tv.tv_sec,\ out_cont_tv.tv_usec); pr_debug("Cont Pattern Matched"); } } } static void config_debug_fs_init(void) { out_buffer = kmalloc(OUT_BUFFER_SIZE, GFP_KERNEL); out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\ S_IFREG | S_IRUGO | S_IWUGO,\ NULL, NULL, &audio_output_latency_debug_fops); if (IS_ERR(out_dentry)) pr_err("debugfs_create_file failed\n"); in_buffer = kmalloc(IN_BUFFER_SIZE, GFP_KERNEL); in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\ S_IFREG | S_IRUGO | S_IWUGO,\ NULL, NULL, &audio_input_latency_debug_fops); if (IS_ERR(in_dentry)) pr_err("debugfs_create_file failed\n"); } #else static void config_debug_fs_write(struct audio_buffer *ab) { return; } static void config_debug_fs_run(void) { return; } static void config_debug_fs_reset_index(void) { return; } static void config_debug_fs_read_cb(void) { return; } static void config_debug_fs_write_cb(void) { return; } static void config_debug_fs_init(void) { return; } #endif static int q6asm_session_alloc(struct audio_client *ac) { int n; mutex_lock(&session_lock); for (n = 1; n <= SESSION_MAX; n++) { if (!session[n]) { session[n] = ac; mutex_unlock(&session_lock); return n; } } mutex_unlock(&session_lock); return -ENOMEM; } static void q6asm_session_free(struct audio_client *ac) { pr_debug("%s: sessionid[%d]\n", __func__, ac->session); rtac_remove_popp_from_adm_devices(ac->session); mutex_lock(&session_lock); session[ac->session] = 0; mutex_unlock(&session_lock); ac->session = 0; return; } int q6asm_audio_client_buf_free(unsigned int dir, struct audio_client *ac) { struct audio_port_data *port; int cnt = 0; int rc = 0; pr_debug("%s: Session id %d\n", __func__, ac->session); mutex_lock(&ac->cmd_lock); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; if (!port->buf) { mutex_unlock(&ac->cmd_lock); return 0; } cnt = port->max_buf_cnt - 1; if (cnt >= 0) { rc = q6asm_memory_unmap_regions(ac, dir, port->buf[0].size, port->max_buf_cnt); if (rc < 0) pr_err("%s CMD Memory_unmap_regions failed\n", __func__); } while (cnt >= 0) { if (port->buf[cnt].data) { ion_unmap_kernel(port->buf[cnt].client, port->buf[cnt].handle); ion_free(port->buf[cnt].client, port->buf[cnt].handle); ion_client_destroy(port->buf[cnt].client); port->buf[cnt].data = NULL; port->buf[cnt].phys = 0; --(port->max_buf_cnt); } --cnt; } kfree(port->buf); port->buf = NULL; } mutex_unlock(&ac->cmd_lock); return 0; } int q6asm_audio_client_buf_free_contiguous(unsigned int dir, struct audio_client *ac) { struct audio_port_data *port; int cnt = 0; int rc = 0; pr_debug("%s: Session id %d\n", __func__, ac->session); mutex_lock(&ac->cmd_lock); port = &ac->port[dir]; if (!port->buf) { mutex_unlock(&ac->cmd_lock); return 0; } cnt = port->max_buf_cnt - 1; if (cnt >= 0) { rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir); if (rc < 0) pr_err("%s CMD Memory_unmap_regions failed\n", __func__); } if (port->buf[0].data) { ion_unmap_kernel(port->buf[0].client, port->buf[0].handle); ion_free(port->buf[0].client, port->buf[0].handle); ion_client_destroy(port->buf[0].client); pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n", __func__, (void *)port->buf[0].data, (void *)port->buf[0].phys, (void *)&port->buf[0].phys, (void *)port->buf[0].client, (void *)port->buf[0].handle); } while (cnt >= 0) { port->buf[cnt].data = NULL; port->buf[cnt].phys = 0; cnt--; } port->max_buf_cnt = 0; kfree(port->buf); port->buf = NULL; mutex_unlock(&ac->cmd_lock); return 0; } int q6asm_mmap_apr_dereg(void) { if (atomic_read(&this_mmap.ref_cnt) <= 0) { pr_err("%s: APR Common Port Already Closed\n", __func__); goto done; } atomic_dec(&this_mmap.ref_cnt); if (atomic_read(&this_mmap.ref_cnt) == 0) { apr_deregister(this_mmap.apr); pr_debug("%s:APR De-Register common port\n", __func__); } done: return 0; } void q6asm_audio_client_free(struct audio_client *ac) { int loopcnt; struct audio_port_data *port; if (!ac || !ac->session) return; pr_debug("%s: Session id %d\n", __func__, ac->session); if (ac->io_mode == SYNC_IO_MODE) { for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { port = &ac->port[loopcnt]; if (!port->buf) continue; pr_debug("%s:loopcnt = %d\n", __func__, loopcnt); q6asm_audio_client_buf_free(loopcnt, ac); } } apr_deregister(ac->apr); ac->mmap_apr = NULL; q6asm_session_free(ac); q6asm_mmap_apr_dereg(); pr_debug("%s: APR De-Register\n", __func__); /*done:*/ kfree(ac); return; } int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode) { ac->io_mode &= 0xFF00; pr_debug("%s ac->mode after anding with FF00:0x[%x],\n", __func__, ac->io_mode); if (ac == NULL) { pr_err("%s APR handle NULL\n", __func__); return -EINVAL; } if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) { ac->io_mode |= mode; pr_debug("%s:Set Mode to 0x[%x]\n", __func__, ac->io_mode); return 0; } else { pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode); return -EINVAL; } } void *q6asm_mmap_apr_reg(void) { if (atomic_read(&this_mmap.ref_cnt) == 0) { this_mmap.apr = apr_register("ADSP", "ASM", \ (apr_fn)q6asm_mmapcallback,\ 0x0FFFFFFFF, &this_mmap); if (this_mmap.apr == NULL) { pr_debug("%s Unable to register APR ASM common port\n", __func__); goto fail; } } atomic_inc(&this_mmap.ref_cnt); return this_mmap.apr; fail: return NULL; } struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv) { struct audio_client *ac; int n; int lcnt = 0; ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL); if (!ac) return NULL; n = q6asm_session_alloc(ac); if (n <= 0) goto fail_session; ac->session = n; ac->cb = cb; ac->priv = priv; ac->io_mode = SYNC_IO_MODE; ac->apr = apr_register("ADSP", "ASM", \ (apr_fn)q6asm_callback,\ ((ac->session) << 8 | 0x0001),\ ac); if (ac->apr == NULL) { pr_err("%s Registration with APR failed\n", __func__); goto fail; } rtac_set_asm_handle(n, ac->apr); pr_debug("%s Registering the common port with APR\n", __func__); ac->mmap_apr = q6asm_mmap_apr_reg(); if (ac->mmap_apr == NULL) goto fail; init_waitqueue_head(&ac->cmd_wait); INIT_LIST_HEAD(&ac->port[0].mem_map_handle); INIT_LIST_HEAD(&ac->port[1].mem_map_handle); pr_debug("%s: mem_map_handle list init'ed\n", __func__); mutex_init(&ac->cmd_lock); for (lcnt = 0; lcnt <= OUT; lcnt++) { mutex_init(&ac->port[lcnt].lock); spin_lock_init(&ac->port[lcnt].dsp_lock); } atomic_set(&ac->cmd_state, 0); pr_debug("%s: session[%d]\n", __func__, ac->session); return ac; fail: q6asm_audio_client_free(ac); return NULL; fail_session: kfree(ac); return NULL; } struct audio_client *q6asm_get_audio_client(int session_id) { if ((session_id <= 0) || (session_id > SESSION_MAX)) { pr_err("%s: invalid session: %d\n", __func__, session_id); goto err; } if (!session[session_id]) { pr_err("%s: session not active: %d\n", __func__, session_id); goto err; } return session[session_id]; err: return NULL; } int q6asm_audio_client_buf_alloc(unsigned int dir, struct audio_client *ac, unsigned int bufsz, unsigned int bufcnt) { int cnt = 0; int rc = 0; struct audio_buffer *buf; int len; if (!(ac) || ((dir != IN) && (dir != OUT))) return -EINVAL; pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, bufsz, bufcnt); if (ac->session <= 0 || ac->session > 8) goto fail; if (ac->io_mode == SYNC_IO_MODE) { if (ac->port[dir].buf) { pr_debug("%s: buffer already allocated\n", __func__); return 0; } mutex_lock(&ac->cmd_lock); buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), GFP_KERNEL); if (!buf) { mutex_unlock(&ac->cmd_lock); goto fail; } ac->port[dir].buf = buf; while (cnt < bufcnt) { if (bufsz > 0) { if (!buf[cnt].data) { buf[cnt].client = msm_ion_client_create (UINT_MAX, "audio_client"); if (IS_ERR_OR_NULL((void *) buf[cnt].client)) { pr_err("%s: ION create client for AUDIO failed\n", __func__); goto fail; } buf[cnt].handle = ion_alloc (buf[cnt].client, bufsz, SZ_4K, (0x1 << ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) buf[cnt].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", __func__); goto fail; } rc = ion_phys(buf[cnt].client, buf[cnt].handle, (ion_phys_addr_t *) &buf[cnt].phys, (size_t *)&len); if (rc) { pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n", __func__, rc); goto fail; } buf[cnt].data = ion_map_kernel (buf[cnt].client, buf[cnt].handle); if (IS_ERR_OR_NULL((void *) buf[cnt].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", __func__); goto fail; } memset((void *)buf[cnt].data, 0, bufsz); buf[cnt].used = 1; buf[cnt].size = bufsz; buf[cnt].actual_size = bufsz; pr_debug("%s data[%p]phys[%p][%p]\n", __func__, (void *)buf[cnt].data, (void *)buf[cnt].phys, (void *)&buf[cnt].phys); cnt++; } } } ac->port[dir].max_buf_cnt = cnt; mutex_unlock(&ac->cmd_lock); rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt); if (rc < 0) { pr_err("%s:CMD Memory_map_regions failed\n", __func__); goto fail; } } return 0; fail: q6asm_audio_client_buf_free(dir, ac); return -EINVAL; } int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, struct audio_client *ac, unsigned int bufsz, unsigned int bufcnt) { int cnt = 0; int rc = 0; struct audio_buffer *buf; int len; if (!(ac) || ((dir != IN) && (dir != OUT))) return -EINVAL; pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, bufsz, bufcnt); if (ac->session <= 0 || ac->session > 8) goto fail; if (ac->port[dir].buf) { pr_debug("%s: buffer already allocated\n", __func__); return 0; } mutex_lock(&ac->cmd_lock); buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), GFP_KERNEL); if (!buf) { mutex_unlock(&ac->cmd_lock); goto fail; } ac->port[dir].buf = buf; buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client"); if (IS_ERR_OR_NULL((void *)buf[0].client)) { pr_err("%s: ION create client for AUDIO failed\n", __func__); goto fail; } buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K, (0x1 << ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) buf[0].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", __func__); goto fail; } rc = ion_phys(buf[0].client, buf[0].handle, (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len); if (rc) { pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n", __func__, rc); goto fail; } buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle); if (IS_ERR_OR_NULL((void *) buf[0].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", __func__); goto fail; } memset((void *)buf[0].data, 0, (bufsz * bufcnt)); if (!buf[0].data) { pr_err("%s:invalid vaddr, iomap failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].used = dir ^ 1; buf[0].size = bufsz; buf[0].actual_size = bufsz; cnt = 1; while (cnt < bufcnt) { if (bufsz > 0) { buf[cnt].data = buf[0].data + (cnt * bufsz); buf[cnt].phys = buf[0].phys + (cnt * bufsz); if (!buf[cnt].data) { pr_err("%s Buf alloc failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].used = dir ^ 1; buf[cnt].size = bufsz; buf[cnt].actual_size = bufsz; pr_debug("%s data[%p]phys[%p][%p]\n", __func__, (void *)buf[cnt].data, (void *)buf[cnt].phys, (void *)&buf[cnt].phys); } cnt++; } ac->port[dir].max_buf_cnt = cnt; mutex_unlock(&ac->cmd_lock); rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt); if (rc < 0) { pr_err("%s:CMD Memory_map_regions failed\n", __func__); goto fail; } return 0; fail: q6asm_audio_client_buf_free_contiguous(dir, ac); return -EINVAL; } static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv) { uint32_t sid = 0; uint32_t dir = 0; uint32_t *payload = data->payload; unsigned long dsp_flags; struct audio_client *ac = NULL; struct audio_port_data *port; if (!data) { pr_err("%s: Invalid CB\n", __func__); return 0; } if (data->opcode == RESET_EVENTS) { pr_debug("%s: Reset event is received: %d %d apr[%p]\n", __func__, data->reset_event, data->reset_proc, this_mmap.apr); apr_reset(this_mmap.apr); atomic_set(&this_mmap.ref_cnt, 0); this_mmap.apr = NULL; return 0; } sid = (data->token >> 8) & 0x0F; ac = q6asm_get_audio_client(sid); pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n", __func__, payload[0], payload[1], data->opcode, data->token, data->payload_size, data->src_port, data->dest_port, sid, dir); pr_debug("%s:Payload = [0x%x] status[0x%x]\n", __func__, payload[0], payload[1]); if (data->opcode == APR_BASIC_RSP_RESULT) { switch (payload[0]) { case ASM_CMD_SHARED_MEM_MAP_REGIONS: case ASM_CMD_SHARED_MEM_UNMAP_REGIONS: if (atomic_read(&ac->cmd_state)) { atomic_set(&ac->cmd_state, 0); wake_up(&ac->cmd_wait); } pr_debug("%s:Payload = [0x%x] status[0x%x]\n", __func__, payload[0], payload[1]); break; default: pr_debug("%s:command[0x%x] not expecting rsp\n", __func__, payload[0]); break; } return 0; } dir = (data->token & 0x0F); port = &ac->port[dir]; switch (data->opcode) { case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:{ pr_debug("%s:PL#0[0x%x]PL#1 [0x%x] dir=%x s_id=%x\n", __func__, payload[0], payload[1], dir, sid); spin_lock_irqsave(&port->dsp_lock, dsp_flags); if (atomic_read(&ac->cmd_state)) { ac->port[dir].tmp_hdl = payload[0]; atomic_set(&ac->cmd_state, 0); wake_up(&ac->cmd_wait); } spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); break; } case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:{ pr_debug("%s:PL#0[0x%x]PL#1 [0x%x]\n", __func__, payload[0], payload[1]); spin_lock_irqsave(&port->dsp_lock, dsp_flags); if (atomic_read(&ac->cmd_state)) { atomic_set(&ac->cmd_state, 0); wake_up(&ac->cmd_wait); } spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); break; } default: pr_debug("%s:command[0x%x]success [0x%x]\n", __func__, payload[0], payload[1]); } if (ac->cb) ac->cb(data->opcode, data->token, data->payload, ac->priv); return 0; } static int32_t q6asm_callback(struct apr_client_data *data, void *priv) { int i = 0; struct audio_client *ac = (struct audio_client *)priv; uint32_t token; unsigned long dsp_flags; uint32_t *payload; if ((ac == NULL) || (data == NULL)) { pr_err("ac or priv NULL\n"); return -EINVAL; } if (ac->session <= 0 || ac->session > 8) { pr_err("%s:Session ID is invalid, session = %d\n", __func__, ac->session); return -EINVAL; } payload = data->payload; if (data->opcode == RESET_EVENTS) { pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n", data->reset_event, data->reset_proc, ac->apr); if (ac->cb) ac->cb(data->opcode, data->token, (uint32_t *)data->payload, ac->priv); apr_reset(ac->apr); return 0; } pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, ac->session, data->opcode, data->token, data->payload_size, data->src_port, data->dest_port); if ((data->opcode != ASM_DATA_EVENT_RENDERED_EOS) && (data->opcode != ASM_DATA_EVENT_EOS)) pr_debug("%s:Payload = [0x%x] status[0x%x]\n", __func__, payload[0], payload[1]); if (data->opcode == APR_BASIC_RSP_RESULT) { token = data->token; switch (payload[0]) { case ASM_STREAM_CMD_SET_PP_PARAMS_V2: if (rtac_make_asm_callback(ac->session, payload, data->payload_size)) break; case ASM_SESSION_CMD_PAUSE: case ASM_DATA_CMD_EOS: case ASM_STREAM_CMD_CLOSE: case ASM_STREAM_CMD_FLUSH: case ASM_SESSION_CMD_RUN_V2: case ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS: case ASM_STREAM_CMD_FLUSH_READBUFS: pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]); if (token != ac->session) { pr_err("%s:Invalid session[%d] rxed expected[%d]", __func__, token, ac->session); return -EINVAL; } case ASM_STREAM_CMD_OPEN_READ_V2: case ASM_STREAM_CMD_OPEN_WRITE_V2: case ASM_STREAM_CMD_OPEN_READWRITE_V2: case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2: case ASM_STREAM_CMD_SET_ENCDEC_PARAM: pr_debug("%s:Payload = [0x%x]stat[0x%x]\n", __func__, payload[0], payload[1]); if (atomic_read(&ac->cmd_state)) { atomic_set(&ac->cmd_state, 0); wake_up(&ac->cmd_wait); } if (ac->cb) ac->cb(data->opcode, data->token, (uint32_t *)data->payload, ac->priv); break; default: pr_debug("%s:command[0x%x] not expecting rsp\n", __func__, payload[0]); break; } return 0; } switch (data->opcode) { case ASM_DATA_EVENT_WRITE_DONE_V2:{ struct audio_port_data *port = &ac->port[IN]; pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]", __func__, payload[0], payload[1], data->token); if (ac->io_mode == SYNC_IO_MODE) { if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); if (port->buf[data->token].phys != payload[0]) { pr_err("Buf expected[%p]rxed[%p]\n",\ (void *)port->buf[data->token].phys,\ (void *)payload[0]); spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); return -EINVAL; } token = data->token; port->buf[token].used = 1; spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); config_debug_fs_write_cb(); for (i = 0; i < port->max_buf_cnt; i++) pr_debug("%d ", port->buf[i].used); } break; } case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2: rtac_make_asm_callback(ac->session, payload, data->payload_size); break; case ASM_DATA_EVENT_READ_DONE_V2:{ struct audio_port_data *port = &ac->port[OUT]; config_debug_fs_read_cb(); pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n", __func__, payload[READDONE_IDX_STATUS], payload[READDONE_IDX_BUFADD_LSW], payload[READDONE_IDX_SIZE], payload[READDONE_IDX_OFFSET]); pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d memmap_hdl=%x flags=%d id=%d num=%d\n", __func__, payload[READDONE_IDX_MSW_TS], payload[READDONE_IDX_LSW_TS], payload[READDONE_IDX_MEMMAP_HDL], payload[READDONE_IDX_FLAGS], payload[READDONE_IDX_SEQ_ID], payload[READDONE_IDX_NUMFRAMES]); if (ac->io_mode == SYNC_IO_MODE) { if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); token = data->token; port->buf[token].used = 0; if (port->buf[token].phys != payload[READDONE_IDX_BUFADD_LSW]) { pr_err("Buf expected[%p]rxed[%p]\n",\ (void *)port->buf[token].phys,\ (void *)payload[READDONE_IDX_BUFADD_LSW]); spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); break; } port->buf[token].actual_size = payload[READDONE_IDX_SIZE]; spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); } break; } case ASM_DATA_EVENT_EOS: case ASM_DATA_EVENT_RENDERED_EOS: pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n", __func__, data->opcode); break; case ASM_SESSION_EVENTX_OVERFLOW: pr_err("ASM_SESSION_EVENTX_OVERFLOW\n"); break; case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3: pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, payload[0] = %d, payload[1] = %d, payload[2] = %d\n", __func__, payload[0], payload[1], payload[2]); ac->time_stamp = (uint64_t)(((uint64_t)payload[2] << 32) | payload[1]); if (atomic_read(&ac->cmd_state)) { atomic_set(&ac->cmd_state, 0); wake_up(&ac->cmd_wait); } break; case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY: case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n", __func__, payload[0], payload[1], payload[2], payload[3]); break; } if (ac->cb) ac->cb(data->opcode, data->token, data->payload, ac->priv); return 0; } void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size, uint32_t *index) { void *data; unsigned char idx; struct audio_port_data *port; if (!ac || ((dir != IN) && (dir != OUT))) return NULL; if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; mutex_lock(&port->lock); idx = port->cpu_buf; if (port->buf == NULL) { pr_debug("%s:Buffer pointer null\n", __func__); mutex_unlock(&port->lock); return NULL; } /* dir 0: used = 0 means buf in use dir 1: used = 1 means buf in use */ if (port->buf[idx].used == dir) { /* To make it more robust, we could loop and get the next avail buf, its risky though */ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n", __func__, idx, dir); mutex_unlock(&port->lock); return NULL; } *size = port->buf[idx].actual_size; *index = port->cpu_buf; data = port->buf[idx].data; pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", __func__, ac->session, port->cpu_buf, data, *size); /* By default increase the cpu_buf cnt user accesses this function,increase cpu buf(to avoid another api)*/ port->buf[idx].used = dir; port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); mutex_unlock(&port->lock); return data; } return NULL; } void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac, uint32_t *size, uint32_t *index) { void *data; unsigned char idx; struct audio_port_data *port; if (!ac || ((dir != IN) && (dir != OUT))) return NULL; port = &ac->port[dir]; idx = port->cpu_buf; if (port->buf == NULL) { pr_debug("%s:Buffer pointer null\n", __func__); return NULL; } /* * dir 0: used = 0 means buf in use * dir 1: used = 1 means buf in use */ if (port->buf[idx].used == dir) { /* * To make it more robust, we could loop and get the * next avail buf, its risky though */ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n", __func__, idx, dir); return NULL; } *size = port->buf[idx].actual_size; *index = port->cpu_buf; data = port->buf[idx].data; pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", __func__, ac->session, port->cpu_buf, data, *size); /* * By default increase the cpu_buf cnt * user accesses this function,increase cpu * buf(to avoid another api) */ port->buf[idx].used = dir; port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); return data; } int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac) { int ret = -1; struct audio_port_data *port; uint32_t idx; if (!ac || (dir != OUT)) return ret; if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; mutex_lock(&port->lock); idx = port->dsp_buf; if (port->buf[idx].used == (dir ^ 1)) { /* To make it more robust, we could loop and get the next avail buf, its risky though */ pr_err("Next buf idx[0x%x] not available, dir[%d]\n", idx, dir); mutex_unlock(&port->lock); return ret; } pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__, ac->session, port->dsp_buf, port->cpu_buf); ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1); mutex_unlock(&port->lock); } return ret; } static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg) { pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size, cmd_flg, ac->session); mutex_lock(&ac->cmd_lock); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)ac->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_ASM; hdr->dest_domain = APR_DOMAIN_ADSP; hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; if (cmd_flg) { hdr->token = ac->session; atomic_set(&ac->cmd_state, 1); } hdr->pkt_size = pkt_size; mutex_unlock(&ac->cmd_lock); return; } static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg) { pr_debug("pkt_size = %d, cmd_flg = %d, session = %d\n", pkt_size, cmd_flg, ac->session); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)ac->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_ASM; hdr->dest_domain = APR_DOMAIN_ADSP; hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; if (cmd_flg) { hdr->token = ac->session; atomic_set(&ac->cmd_state, 1); } hdr->pkt_size = pkt_size; return; } static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr, u32 pkt_size, u32 cmd_flg, u32 token) { pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); hdr->src_port = 0; hdr->dest_port = 0; if (cmd_flg) { hdr->token = token; atomic_set(&ac->cmd_state, 1); } hdr->pkt_size = pkt_size; return; } int q6asm_open_read(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_read_v2 open; uint16_t bits_per_sample = 16; config_debug_fs_reset_index(); if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s:session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V2; /* Stream prio : High, provide meta info with encoded frames */ open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX; open.preprocopo_id = get_asm_topology(); if (open.preprocopo_id == 0) open.preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT; open.bits_per_sample = bits_per_sample; switch (format) { case FORMAT_LINEAR_PCM: open.mode_flags = 0x00; open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; break; case FORMAT_MPEG4_AAC: open.mode_flags = BUFFER_META_ENABLE; open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2; break; case FORMAT_V13K: open.mode_flags = BUFFER_META_ENABLE; open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS; break; case FORMAT_EVRC: open.mode_flags = BUFFER_META_ENABLE; open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS; break; case FORMAT_AMRNB: open.mode_flags = BUFFER_META_ENABLE ; open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS; break; case FORMAT_AMRWB: open.mode_flags = BUFFER_META_ENABLE ; open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS; break; default: pr_err("Invalid format[%d]\n", format); goto fail_cmd; } rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", \ open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for open read rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_write(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_write_v2 open; if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session, format); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V2; open.mode_flags = 0x00; /* source endpoint : matrix */ open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX; open.bits_per_sample = 16; open.postprocopo_id = get_asm_topology(); if (open.postprocopo_id == 0) open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT; switch (format) { case FORMAT_LINEAR_PCM: open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; break; case FORMAT_MPEG4_AAC: open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2; break; case FORMAT_MPEG4_MULTI_AAC: open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC; break; case FORMAT_WMA_V9: open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2; break; case FORMAT_WMA_V10PRO: open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2; break; case FORMAT_MP3: open.dec_fmt_id = ASM_MEDIA_FMT_MP3; break; default: pr_err("%s: Invalid format[%d]\n", __func__, format); goto fail_cmd; } rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("%s: open failed op[0x%x]rc[%d]\n", \ __func__, open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for open write rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format, uint32_t wr_format) { int rc = 0x00; struct asm_stream_cmd_open_readwrite_v2 open; if ((ac == NULL) || (ac->apr == NULL)) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d]", __func__, ac->session); pr_debug("wr_format[0x%x]rd_format[0x%x]", wr_format, rd_format); ac->io_mode |= NT_MODE; q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2; open.mode_flags = BUFFER_META_ENABLE; open.bits_per_sample = 16; /* source endpoint : matrix */ open.postprocopo_id = get_asm_topology(); if (open.postprocopo_id == 0) open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT; switch (wr_format) { case FORMAT_LINEAR_PCM: open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; break; case FORMAT_MPEG4_AAC: open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2; break; case FORMAT_MPEG4_MULTI_AAC: open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC; break; case FORMAT_WMA_V9: open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2; break; case FORMAT_WMA_V10PRO: open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2; break; case FORMAT_AMRNB: open.dec_fmt_id = ASM_MEDIA_FMT_AMRNB_FS; break; case FORMAT_AMRWB: open.dec_fmt_id = ASM_MEDIA_FMT_AMRWB_FS; break; case FORMAT_V13K: open.dec_fmt_id = ASM_MEDIA_FMT_V13K_FS; break; case FORMAT_EVRC: open.dec_fmt_id = ASM_MEDIA_FMT_EVRC_FS; break; case FORMAT_EVRCB: open.dec_fmt_id = ASM_MEDIA_FMT_EVRCB_FS; break; case FORMAT_EVRCWB: open.dec_fmt_id = ASM_MEDIA_FMT_EVRCWB_FS; break; case FORMAT_MP3: open.dec_fmt_id = ASM_MEDIA_FMT_MP3; break; default: pr_err("Invalid format[%d]\n", wr_format); goto fail_cmd; } switch (rd_format) { case FORMAT_LINEAR_PCM: open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; break; case FORMAT_MPEG4_AAC: open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2; break; case FORMAT_V13K: open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS; break; case FORMAT_EVRC: open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS; break; case FORMAT_AMRNB: open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS; break; case FORMAT_AMRWB: open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS; break; default: pr_err("Invalid format[%d]\n", rd_format); goto fail_cmd; } pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__, open.enc_cfg_id, open.dec_fmt_id); rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", \ open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for open read-write rc[%d]\n", rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts, uint32_t lsw_ts) { struct asm_session_cmd_run_v2 run; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE); run.hdr.opcode = ASM_SESSION_CMD_RUN_V2; run.flags = flags; run.time_lsw = lsw_ts; run.time_msw = msw_ts; config_debug_fs_run(); rc = apr_send_pkt(ac->apr, (uint32_t *) &run); if (rc < 0) { pr_err("Commmand run failed[%d]", rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for run success rc[%d]", rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts, uint32_t lsw_ts) { struct asm_session_cmd_run_v2 run; int rc; if (!ac || ac->apr == NULL) { pr_err("%s:APR handle NULL\n", __func__); return -EINVAL; } pr_debug("session[%d]", ac->session); q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE); run.hdr.opcode = ASM_SESSION_CMD_RUN_V2; run.flags = flags; run.time_lsw = lsw_ts; run.time_msw = msw_ts; rc = apr_send_pkt(ac->apr, (uint32_t *) &run); if (rc < 0) { pr_err("%s:Commmand run failed[%d]", __func__, rc); return -EINVAL; } return 0; } int q6asm_enc_cfg_blk_aac(struct audio_client *ac, uint32_t frames_per_buf, uint32_t sample_rate, uint32_t channels, uint32_t bit_rate, uint32_t mode, uint32_t format) { struct asm_aac_enc_cfg_v2 enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]", __func__, ac->session, frames_per_buf, sample_rate, channels, bit_rate, mode, format); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) - sizeof(struct asm_stream_cmd_set_encdec_param); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.bit_rate = bit_rate; enc_cfg.enc_mode = mode; enc_cfg.aac_fmt_flag = format; enc_cfg.channel_cfg = channels; enc_cfg.sample_rate = sample_rate; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_set_encdec_chan_map(struct audio_client *ac, uint32_t num_channels) { struct asm_dec_out_chan_map_param chan_map; u8 *channel_mapping; int rc = 0; pr_debug("%s: Session %d, num_channels = %d\n", __func__, ac->session, num_channels); q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE); chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP; chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) - (sizeof(struct apr_hdr) + sizeof(struct asm_stream_cmd_set_encdec_param)); chan_map.num_channels = num_channels; channel_mapping = chan_map.channel_mapping; memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS); if (q6asm_map_channels(channel_mapping, num_channels)) return -EINVAL; rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map); if (rc < 0) { pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n", __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout opcode[0x%x]\n", __func__, chan_map.hdr.opcode); rc = -ETIMEDOUT; goto fail_cmd; } return 0; fail_cmd: return rc; } int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg; u8 *channel_mapping; u32 frames_per_buf = 0; int rc = 0; pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) - sizeof(enc_cfg.encdec); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.num_channels = channels; enc_cfg.bits_per_sample = 16; enc_cfg.sample_rate = rate; enc_cfg.is_signed = 1; channel_mapping = enc_cfg.channel_mapping; /* ??? PHANI */ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); if (q6asm_map_channels(channel_mapping, channels)) return -EINVAL; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd open failed\n"); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg; u8 *channel_mapping; u32 frames_per_buf = 0; int rc = 0; pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) - sizeof(enc_cfg.encdec); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.num_channels = 0;/*channels;*/ enc_cfg.bits_per_sample = 16; enc_cfg.sample_rate = 0;/*rate;*/ enc_cfg.is_signed = 1; channel_mapping = enc_cfg.channel_mapping; /* ??? PHANI */ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); if (q6asm_map_channels(channel_mapping, channels)) return -EINVAL; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd open failed\n"); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels) { u8 *lchannel_mapping; lchannel_mapping = channel_mapping; pr_debug("%s channels passed: %d\n", __func__, channels); if (channels == 1) { lchannel_mapping[0] = PCM_CHANNEL_FC; } else if (channels == 2) { lchannel_mapping[0] = PCM_CHANNEL_FL; lchannel_mapping[1] = PCM_CHANNEL_FR; } else if (channels == 3) { lchannel_mapping[0] = PCM_CHANNEL_FC; lchannel_mapping[1] = PCM_CHANNEL_FL; lchannel_mapping[2] = PCM_CHANNEL_FR; } else if (channels == 4) { lchannel_mapping[0] = PCM_CHANNEL_FC; lchannel_mapping[1] = PCM_CHANNEL_FL; lchannel_mapping[2] = PCM_CHANNEL_FR; lchannel_mapping[3] = PCM_CHANNEL_LB; } else if (channels == 5) { lchannel_mapping[0] = PCM_CHANNEL_FC; lchannel_mapping[1] = PCM_CHANNEL_FL; lchannel_mapping[2] = PCM_CHANNEL_FR; lchannel_mapping[3] = PCM_CHANNEL_LB; lchannel_mapping[4] = PCM_CHANNEL_RB; } else if (channels == 6) { lchannel_mapping[0] = PCM_CHANNEL_FC; lchannel_mapping[1] = PCM_CHANNEL_FL; lchannel_mapping[2] = PCM_CHANNEL_FR; lchannel_mapping[3] = PCM_CHANNEL_LB; lchannel_mapping[4] = PCM_CHANNEL_RB; lchannel_mapping[5] = PCM_CHANNEL_LFE; } else { pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__, channels); return -EINVAL; } return 0; } int q6asm_enable_sbrps(struct audio_client *ac, uint32_t sbr_ps_enable) { struct asm_aac_sbr_ps_flag_param sbrps; u32 frames_per_buf = 0; int rc = 0; pr_debug("%s: Session %d\n", __func__, ac->session); q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE); sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG; sbrps.encdec.param_size = sizeof(struct asm_aac_sbr_ps_flag_param) - sizeof(struct asm_stream_cmd_set_encdec_param); sbrps.encblk.frames_per_buf = frames_per_buf; sbrps.encblk.enc_cfg_blk_size = sbrps.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); sbrps.sbr_ps_flag = sbr_ps_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps); if (rc < 0) { pr_err("Command opcode[0x%x]paramid[0x%x] failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_PARAM_ID_AAC_SBR_PS_FLAG); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", sbrps.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_cfg_dual_mono_aac(struct audio_client *ac, uint16_t sce_left, uint16_t sce_right) { struct asm_aac_dual_mono_mapping_param dual_mono; u32 frames_per_buf = 0; int rc = 0; pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n", __func__, ac->session, sce_left, sce_right); q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE); dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING; dual_mono.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) - sizeof(struct asm_stream_cmd_set_encdec_param); dual_mono.encblk.frames_per_buf = frames_per_buf; dual_mono.encblk.enc_cfg_blk_size = dual_mono.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); dual_mono.left_channel_sce = sce_left; dual_mono.right_channel_sce = sce_right; rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono); if (rc < 0) { pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n", __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout opcode[0x%x]\n", __func__, dual_mono.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } /* Support for selecting stereo mixing coefficients for B family not done */ int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff) { /* To Be Done */ return 0; } int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, uint16_t min_rate, uint16_t max_rate, uint16_t reduced_rate_level, uint16_t rate_modulation_cmd) { struct asm_v13k_enc_cfg enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__, ac->session, frames_per_buf, min_rate, max_rate, reduced_rate_level, rate_modulation_cmd); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) - sizeof(struct asm_stream_cmd_set_encdec_param); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.min_rate = min_rate; enc_cfg.max_rate = max_rate; enc_cfg.reduced_rate_cmd = reduced_rate_level; enc_cfg.rate_mod_cmd = rate_modulation_cmd; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for setencdec v13k resp\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, uint16_t min_rate, uint16_t max_rate, uint16_t rate_modulation_cmd) { struct asm_evrc_enc_cfg enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]", __func__, ac->session, frames_per_buf, min_rate, max_rate, rate_modulation_cmd); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) - sizeof(struct asm_stream_cmd_set_encdec_param); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.min_rate = min_rate; enc_cfg.max_rate = max_rate; enc_cfg.rate_mod_cmd = rate_modulation_cmd; enc_cfg.reserved = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for encdec evrc\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, uint16_t band_mode, uint16_t dtx_enable) { struct asm_amrnb_enc_cfg enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", __func__, ac->session, frames_per_buf, band_mode, dtx_enable); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) - sizeof(struct asm_stream_cmd_set_encdec_param); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.enc_mode = band_mode; enc_cfg.dtx_mode = dtx_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for set encdec amrnb\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf, uint16_t band_mode, uint16_t dtx_enable) { struct asm_amrwb_enc_cfg enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", __func__, ac->session, frames_per_buf, band_mode, dtx_enable); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) - sizeof(struct asm_stream_cmd_set_encdec_param); enc_cfg.encblk.frames_per_buf = frames_per_buf; enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - sizeof(struct asm_enc_cfg_blk_param_v2); enc_cfg.enc_mode = band_mode; enc_cfg.dtx_mode = dtx_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_aac(struct audio_client *ac, struct asm_aac_cfg *cfg) { return q6asm_media_format_block_multi_aac(ac, cfg); } int q6asm_media_format_block_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_multi_channel_pcm_fmt_blk_v2 fmt; u8 *channel_mapping; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - sizeof(fmt.fmt_blk); fmt.num_channels = channels; fmt.bits_per_sample = 16; fmt.sample_rate = rate; fmt.is_signed = 1; channel_mapping = fmt.channel_mapping; memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); if (q6asm_map_channels(channel_mapping, channels)) return -EINVAL; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for format update\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_multi_aac(struct audio_client *ac, struct asm_aac_cfg *cfg) { struct asm_aac_fmt_blk_v2 fmt; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, cfg->sample_rate, cfg->ch_cfg); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - sizeof(fmt.fmt_blk); fmt.aac_fmt_flag = cfg->format; fmt.audio_objype = cfg->aot; /* If zero, PCE is assumed to be available in bitstream*/ fmt.total_size_of_PCE_bits = 0; fmt.channel_config = cfg->ch_cfg; fmt.sample_rate = cfg->sample_rate; pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n", __func__, fmt.aac_fmt_flag, fmt.fmt_blk.fmt_blk_size, fmt.aac_fmt_flag, fmt.audio_objype, fmt.channel_config, fmt.sample_rate); rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_wma(struct audio_client *ac, void *cfg) { struct asm_wmastdv9_fmt_blk_v2 fmt; struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg; int rc = 0; pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n", ac->session, wma_cfg->format_tag, wma_cfg->sample_rate, wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec, wma_cfg->block_align, wma_cfg->valid_bits_per_sample, wma_cfg->ch_mask, wma_cfg->encode_opt); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - sizeof(fmt.fmtblk); fmt.fmtag = wma_cfg->format_tag; fmt.num_channels = wma_cfg->ch_cfg; fmt.sample_rate = wma_cfg->sample_rate; fmt.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec; fmt.blk_align = wma_cfg->block_align; fmt.bits_per_sample = wma_cfg->valid_bits_per_sample; fmt.channel_mask = wma_cfg->ch_mask; fmt.enc_options = wma_cfg->encode_opt; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_wmapro(struct audio_client *ac, void *cfg) { struct asm_wmaprov10_fmt_blk_v2 fmt; struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg; int rc = 0; pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n", ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate, wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec, wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample, wmapro_cfg->ch_mask, wmapro_cfg->encode_opt, wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - sizeof(fmt.fmtblk); fmt.fmtag = wmapro_cfg->format_tag; fmt.num_channels = wmapro_cfg->ch_cfg; fmt.sample_rate = wmapro_cfg->sample_rate; fmt.avg_bytes_per_sec = wmapro_cfg->avg_bytes_per_sec; fmt.blk_align = wmapro_cfg->block_align; fmt.bits_per_sample = wmapro_cfg->valid_bits_per_sample; fmt.channel_mask = wmapro_cfg->ch_mask; fmt.enc_options = wmapro_cfg->encode_opt; fmt.usAdvancedEncodeOpt = wmapro_cfg->adv_encode_opt; fmt.advanced_enc_options2 = wmapro_cfg->adv_encode_opt2; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir, uint32_t bufsz, uint32_t bufcnt) { struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL; struct avs_shared_map_region_payload *mregions = NULL; struct audio_port_data *port = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; struct asm_buffer_node *buffer_node = NULL; int rc = 0; int cmd_size = 0; if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); buffer_node = kmalloc(sizeof(struct asm_buffer_node), GFP_KERNEL); if (!buffer_node) return -ENOMEM; cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions) + sizeof(struct avs_shared_map_region_payload) * bufcnt; mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (mmap_region_cmd == NULL) { pr_err("%s: Mem alloc failed\n", __func__); rc = -EINVAL; return rc; } mmap_regions = (struct avs_cmd_shared_mem_map_regions *) mmap_region_cmd; q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE, ((ac->session << 8) | dir)); mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS; mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL; mmap_regions->num_regions = bufcnt & 0x00ff; mmap_regions->property_flag = 0x00; payload = ((u8 *) mmap_region_cmd + sizeof(struct avs_cmd_shared_mem_map_regions)); mregions = (struct avs_shared_map_region_payload *)payload; ac->port[dir].tmp_hdl = 0; port = &ac->port[dir]; pr_debug("%s, buf_add 0x%x, bufsz: %d\n", __func__, buf_add, bufsz); mregions->shm_addr_lsw = buf_add; /* Using only 32 bit address */ mregions->shm_addr_msw = 0; mregions->mem_size_bytes = bufsz; ++mregions; rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd); if (rc < 0) { pr_err("mmap op[0x%x]rc[%d]\n", mmap_regions->hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0 && ac->port[dir].tmp_hdl), 5*HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } buffer_node->buf_addr_lsw = buf_add; buffer_node->mmap_hdl = ac->port[dir].tmp_hdl; list_add_tail(&buffer_node->list, &ac->port[dir].mem_map_handle); ac->port[dir].tmp_hdl = 0; rc = 0; fail_cmd: kfree(mmap_region_cmd); return rc; } int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir) { struct avs_cmd_shared_mem_unmap_regions mem_unmap; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; int rc = 0; if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); q6asm_add_mmaphdr(ac, &mem_unmap.hdr, sizeof(struct avs_cmd_shared_mem_unmap_regions), TRUE, ((ac->session << 8) | dir)); mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS; list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == buf_add) { pr_info("%s: Found the element\n", __func__); mem_unmap.mem_map_handle = buf_node->mmap_hdl; break; } } pr_debug("%s: mem_unmap-mem_map_handle: 0x%x", __func__, mem_unmap.mem_map_handle); rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap); if (rc < 0) { pr_err("mem_unmap op[0x%x]rc[%d]\n", mem_unmap.hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5 * HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == buf_add) { list_del(&buf_node->list); kfree(buf_node); } } rc = 0; fail_cmd: return rc; } static int q6asm_memory_map_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt) { struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL; struct avs_shared_map_region_payload *mregions = NULL; struct audio_port_data *port = NULL; struct audio_buffer *ab = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; struct asm_buffer_node *buffer_node = NULL; int rc = 0; int i = 0; int cmd_size = 0; if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions) + (sizeof(struct avs_shared_map_region_payload)); buffer_node = kzalloc(sizeof(struct asm_buffer_node) * bufcnt, GFP_KERNEL); mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if ((mmap_region_cmd == NULL) || (buffer_node == NULL)) { pr_err("%s: Mem alloc failed\n", __func__); rc = -EINVAL; return rc; } mmap_regions = (struct avs_cmd_shared_mem_map_regions *) mmap_region_cmd; q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE, ((ac->session << 8) | dir)); pr_debug("mmap_region=0x%p token=0x%x\n", mmap_regions, ((ac->session << 8) | dir)); mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS; mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL; mmap_regions->num_regions = 1; /*bufcnt & 0x00ff; */ mmap_regions->property_flag = 0x00; pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions); payload = ((u8 *) mmap_region_cmd + sizeof(struct avs_cmd_shared_mem_map_regions)); mregions = (struct avs_shared_map_region_payload *)payload; ac->port[dir].tmp_hdl = 0; port = &ac->port[dir]; ab = &port->buf[0]; mregions->shm_addr_lsw = ab->phys; /* Using only 32 bit address */ mregions->shm_addr_msw = 0; mregions->mem_size_bytes = (bufsz * bufcnt); rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd); if (rc < 0) { pr_err("mmap_regions op[0x%x]rc[%d]\n", mmap_regions->hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0) , 5*HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } mutex_lock(&ac->cmd_lock); for (i = 0; i < bufcnt; i++) { ab = &port->buf[i]; buffer_node[i].buf_addr_lsw = ab->phys; buffer_node[i].mmap_hdl = ac->port[dir].tmp_hdl; list_add_tail(&buffer_node[i].list, &ac->port[dir].mem_map_handle); pr_debug("%s: i=%d, bufadd[i] = 0x%x, maphdl[i] = 0x%x\n", __func__, i, buffer_node[i].buf_addr_lsw, buffer_node[i].mmap_hdl); } ac->port[dir].tmp_hdl = 0; mutex_unlock(&ac->cmd_lock); rc = 0; pr_debug("%s: exit\n", __func__); fail_cmd: kfree(mmap_region_cmd); return rc; } static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt) { struct avs_cmd_shared_mem_unmap_regions mem_unmap; struct audio_port_data *port = NULL; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; uint32_t buf_add; int rc = 0; int cmd_size = 0; if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions); q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size, TRUE, ((ac->session << 8) | dir)); port = &ac->port[dir]; buf_add = (uint32_t)port->buf->phys; mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS; list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == buf_add) { pr_debug("%s: Found the element\n", __func__); mem_unmap.mem_map_handle = buf_node->mmap_hdl; break; } } pr_debug("%s: mem_unmap-mem_map_handle: 0x%x", __func__, mem_unmap.mem_map_handle); rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap); if (rc < 0) { pr_err("mmap_regions op[0x%x]rc[%d]\n", mem_unmap.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for memory_unmap\n"); goto fail_cmd; } list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == buf_add) { list_del(&buf_node->list); kfree(buf_node); } } rc = 0; fail_cmd: return rc; } int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain) { struct asm_volume_ctrl_lr_chan_gain lrgain; int sz = 0; int rc = 0; sz = sizeof(struct asm_volume_ctrl_lr_chan_gain); q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE); lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; lrgain.param.data_payload_addr_lsw = 0; lrgain.param.data_payload_addr_msw = 0; lrgain.param.mem_map_handle = 0; lrgain.param.data_payload_size = sizeof(lrgain) - sizeof(lrgain.hdr) - sizeof(lrgain.param); lrgain.data.module_id = ASM_MODULE_ID_VOL_CTRL; lrgain.data.param_id = ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN; lrgain.data.param_size = lrgain.param.data_payload_size - sizeof(lrgain.data); lrgain.data.reserved = 0; lrgain.l_chan_gain = left_gain; lrgain.r_chan_gain = right_gain; rc = apr_send_pkt(ac->apr, (uint32_t *) &lrgain); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, lrgain.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, lrgain.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_set_mute(struct audio_client *ac, int muteflag) { struct asm_volume_ctrl_mute_config mute; int sz = 0; int rc = 0; sz = sizeof(struct asm_volume_ctrl_mute_config); q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE); mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; mute.param.data_payload_addr_lsw = 0; mute.param.data_payload_addr_msw = 0; mute.param.mem_map_handle = 0; mute.param.data_payload_size = sizeof(mute) - sizeof(mute.hdr) - sizeof(mute.param); mute.data.module_id = ASM_MODULE_ID_VOL_CTRL; mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG; mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data); mute.data.reserved = 0; mute.mute_flag = muteflag; rc = apr_send_pkt(ac->apr, (uint32_t *) &mute); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, mute.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, mute.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_set_volume(struct audio_client *ac, int volume) { struct asm_volume_ctrl_master_gain vol; int sz = 0; int rc = 0; sz = sizeof(struct asm_volume_ctrl_master_gain); q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE); vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; vol.param.data_payload_addr_lsw = 0; vol.param.data_payload_addr_msw = 0; vol.param.mem_map_handle = 0; vol.param.data_payload_size = sizeof(vol) - sizeof(vol.hdr) - sizeof(vol.param); vol.data.module_id = ASM_MODULE_ID_VOL_CTRL; vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data); vol.data.reserved = 0; vol.master_gain = volume; rc = apr_send_pkt(ac->apr, (uint32_t *) &vol); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, vol.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, vol.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_set_softpause(struct audio_client *ac, struct asm_softpause_params *pause_param) { struct asm_soft_pause_params softpause; int sz = 0; int rc = 0; sz = sizeof(struct asm_soft_pause_params); q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE); softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; softpause.param.data_payload_addr_lsw = 0; softpause.param.data_payload_addr_msw = 0; softpause.param.mem_map_handle = 0; softpause.param.data_payload_size = sizeof(softpause) - sizeof(softpause.hdr) - sizeof(softpause.param); softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL; softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS; softpause.data.param_size = softpause.param.data_payload_size - sizeof(softpause.data); softpause.data.reserved = 0; softpause.enable_flag = pause_param->enable; softpause.period = pause_param->period; softpause.step = pause_param->step; softpause.ramping_curve = pause_param->rampingcurve; rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, softpause.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, softpause.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_set_softvolume(struct audio_client *ac, struct asm_softvolume_params *softvol_param) { struct asm_soft_step_volume_params softvol; int sz = 0; int rc = 0; sz = sizeof(struct asm_soft_step_volume_params); q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE); softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; softvol.param.data_payload_addr_lsw = 0; softvol.param.data_payload_addr_msw = 0; softvol.param.mem_map_handle = 0; softvol.param.data_payload_size = sizeof(softvol) - sizeof(softvol.hdr) - sizeof(softvol.param); softvol.data.module_id = ASM_MODULE_ID_VOL_CTRL; softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; softvol.data.param_size = softvol.param.data_payload_size - sizeof(softvol.data); softvol.data.reserved = 0; softvol.period = softvol_param->period; softvol.step = softvol_param->step; softvol.ramping_curve = softvol_param->rampingcurve; rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, softvol.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, softvol.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_equalizer(struct audio_client *ac, void *eq_p) { struct asm_eq_params eq; struct msm_audio_eq_stream_config *eq_params = NULL; int i = 0; int sz = 0; int rc = 0; if (eq_p == NULL) { pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session); rc = -EINVAL; goto fail_cmd; } sz = sizeof(struct asm_eq_params); eq_params = (struct msm_audio_eq_stream_config *) eq_p; q6asm_add_hdr(ac, &eq.hdr, sz, TRUE); eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; eq.param.data_payload_addr_lsw = 0; eq.param.data_payload_addr_msw = 0; eq.param.mem_map_handle = 0; eq.param.data_payload_size = sizeof(eq) - sizeof(eq.hdr) - sizeof(eq.param); eq.data.module_id = ASM_MODULE_ID_EQUALIZER; eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS; eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data); eq.enable_flag = eq_params->enable; eq.num_bands = eq_params->num_bands; pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable, eq_params->num_bands); for (i = 0; i < eq_params->num_bands; i++) { eq.eq_bands[i].band_idx = eq_params->eq_bands[i].band_idx; eq.eq_bands[i].filterype = eq_params->eq_bands[i].filter_type; eq.eq_bands[i].center_freq_hz = eq_params->eq_bands[i].center_freq_hz; eq.eq_bands[i].filter_gain = eq_params->eq_bands[i].filter_gain; eq.eq_bands[i].q_factor = eq_params->eq_bands[i].q_factor; pr_debug("%s: filter_type:%u bandnum:%d\n", __func__, eq_params->eq_bands[i].filter_type, i); pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__, eq_params->eq_bands[i].center_freq_hz, i); pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__, eq_params->eq_bands[i].filter_gain, i); pr_debug("%s: q_factor:%d bandnum:%d\n", __func__, eq_params->eq_bands[i].q_factor, i); } rc = apr_send_pkt(ac->apr, (uint32_t *)&eq); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x]\n", __func__, eq.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, eq.data.param_id); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_read(struct audio_client *ac) { struct asm_data_cmd_read_v2 read; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; struct audio_buffer *ab; int dsp_buf; struct audio_port_data *port; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[OUT]; q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE); mutex_lock(&port->lock); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", __func__, ac->session, dsp_buf, (void *)port->buf[dsp_buf].data, port->cpu_buf, (void *)port->buf[port->cpu_buf].phys); read.hdr.opcode = ASM_DATA_CMD_READ_V2; read.buf_addr_lsw = ab->phys; read.buf_addr_msw = 0; list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == (uint32_t) ab->phys) read.mem_map_handle = buf_node->mmap_hdl; } pr_debug("memory_map handle in q6asm_read: [%0x]:", read.mem_map_handle); read.buf_size = ab->size; read.seq_id = port->dsp_buf; read.hdr.token = port->dsp_buf; port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); mutex_unlock(&port->lock); pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, read.buf_addr_lsw, read.hdr.token, read.seq_id); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); goto fail_cmd; } return 0; } fail_cmd: return -EINVAL; } int q6asm_read_nolock(struct audio_client *ac) { struct asm_data_cmd_read_v2 read; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; struct audio_buffer *ab; int dsp_buf; struct audio_port_data *port; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[OUT]; q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", __func__, ac->session, dsp_buf, (void *)port->buf[dsp_buf].data, port->cpu_buf, (void *)port->buf[port->cpu_buf].phys); read.hdr.opcode = ASM_DATA_CMD_READ_V2; read.buf_addr_lsw = ab->phys; read.buf_addr_msw = 0; read.buf_size = ab->size; read.seq_id = port->dsp_buf; read.hdr.token = port->dsp_buf; list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == (uint32_t)ab->phys) { read.mem_map_handle = buf_node->mmap_hdl; break; } } port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, read.buf_addr_lsw, read.hdr.token, read.seq_id); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); goto fail_cmd; } return 0; } fail_cmd: return -EINVAL; } int q6asm_async_write(struct audio_client *ac, struct audio_aio_write_param *param) { int rc = 0; struct asm_data_cmd_write_v2 write; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; struct audio_buffer *ab; struct audio_port_data *port; u32 lbuf_addr_lsw; u32 liomode; if (!ac || ac->apr == NULL) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); port = &ac->port[IN]; ab = &port->buf[port->dsp_buf]; /* Pass physical address as token for AIO scheme */ write.hdr.token = param->uid; write.hdr.opcode = ASM_DATA_CMD_WRITE_V2; write.buf_addr_lsw = param->paddr; write.buf_addr_msw = 0x00; write.buf_size = param->len; write.timestamp_msw = param->msw_ts; write.timestamp_lsw = param->lsw_ts; liomode = (ASYNC_IO_MODE | NT_MODE); if (ac->io_mode == liomode) { pr_info("%s: subtracting 32 for header\n", __func__); lbuf_addr_lsw = (write.buf_addr_lsw - 32); } else{ lbuf_addr_lsw = write.buf_addr_lsw; } pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_addr_lsw: 0x[%x]\n", __func__, write.hdr.token, write.buf_addr_lsw, write.buf_size, write.timestamp_msw, write.timestamp_lsw, lbuf_addr_lsw); /* Use 0xFF00 for disabling timestamps */ if (param->flags == 0xFF00) write.flags = (0x00000000 | (param->flags & 0x800000FF)); else write.flags = (0x80000000 | param->flags); write.seq_id = param->uid; list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == lbuf_addr_lsw) { write.mem_map_handle = buf_node->mmap_hdl; break; } } rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__, write.hdr.opcode, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_async_read(struct audio_client *ac, struct audio_aio_read_param *param) { int rc = 0; struct asm_data_cmd_read_v2 read; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; u32 lbuf_addr_lsw; u32 liomode; if (!ac || ac->apr == NULL) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); /* Pass physical address as token for AIO scheme */ read.hdr.token = param->paddr; read.hdr.opcode = ASM_DATA_CMD_READ_V2; read.buf_addr_lsw = param->paddr; read.buf_addr_msw = 0; read.buf_size = param->len; read.seq_id = param->uid; liomode = (NT_MODE | ASYNC_IO_MODE); if (ac->io_mode == liomode) { pr_info("%s: subtracting 32 for header\n", __func__); lbuf_addr_lsw = (read.buf_addr_lsw - 32); } else{ lbuf_addr_lsw = read.buf_addr_lsw; } list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) { buf_node = list_entry(ptr, struct asm_buffer_node, list); if (buf_node->buf_addr_lsw == lbuf_addr_lsw) { read.mem_map_handle = buf_node->mmap_hdl; break; } } rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__, read.hdr.opcode, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, uint32_t lsw_ts, uint32_t flags) { int rc = 0; struct asm_data_cmd_write_v2 write; struct asm_buffer_node *buf_node = NULL; struct audio_port_data *port; struct audio_buffer *ab; int dsp_buf = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[IN]; q6asm_add_hdr(ac, &write.hdr, sizeof(write), FALSE); mutex_lock(&port->lock); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; write.hdr.token = port->dsp_buf; write.hdr.opcode = ASM_DATA_CMD_WRITE_V2; write.buf_addr_lsw = ab->phys; write.buf_addr_msw = 0; write.buf_size = len; write.seq_id = port->dsp_buf; write.timestamp_lsw = lsw_ts; write.timestamp_msw = msw_ts; /* Use 0xFF00 for disabling timestamps */ if (flags == 0xFF00) write.flags = (0x00000000 | (flags & 0x800000FF)); else write.flags = (0x80000000 | flags); port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); buf_node = list_first_entry(&ac->port[IN].mem_map_handle, struct asm_buffer_node, list); write.mem_map_handle = buf_node->mmap_hdl; pr_debug("%s:ab->phys[0x%x]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]" , __func__, ab->phys, write.buf_addr_lsw, write.hdr.token, write.seq_id, write.buf_size, write.mem_map_handle); mutex_unlock(&port->lock); config_debug_fs_write(ab); rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); goto fail_cmd; } pr_debug("%s: WRITE SUCCESS\n", __func__); return 0; } fail_cmd: return -EINVAL; } int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, uint32_t lsw_ts, uint32_t flags) { int rc = 0; struct asm_data_cmd_write_v2 write; struct asm_buffer_node *buf_node = NULL; struct audio_port_data *port; struct audio_buffer *ab; int dsp_buf = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[IN]; q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; write.hdr.token = port->dsp_buf; write.hdr.opcode = ASM_DATA_CMD_WRITE_V2; write.buf_addr_lsw = ab->phys; write.buf_addr_msw = 0; write.buf_size = len; write.seq_id = port->dsp_buf; write.timestamp_lsw = lsw_ts; write.timestamp_msw = msw_ts; buf_node = list_first_entry(&ac->port[IN].mem_map_handle, struct asm_buffer_node, list); write.mem_map_handle = buf_node->mmap_hdl; /* Use 0xFF00 for disabling timestamps */ if (flags == 0xFF00) write.flags = (0x00000000 | (flags & 0x800000FF)); else write.flags = (0x80000000 | flags); port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]" , __func__, ab->phys, write.buf_addr_lsw, write.hdr.token, write.seq_id, write.buf_size, write.mem_map_handle); rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); goto fail_cmd; } pr_debug("%s: WRITE SUCCESS\n", __func__); return 0; } fail_cmd: return -EINVAL; } int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp) { struct apr_hdr hdr; int rc; if (!ac || ac->apr == NULL || tstamp == NULL) { pr_err("APR handle NULL or tstamp NULL\n"); return -EINVAL; } q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); hdr.opcode = ASM_SESSION_CMD_GET_SESSIONTIME_V3; atomic_set(&ac->cmd_state, 1); pr_debug("%s: session[%d]opcode[0x%x]\n", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("Commmand 0x%x failed\n", hdr.opcode); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in getting session time from DSP\n", __func__); goto fail_cmd; } *tstamp = ac->time_stamp; return 0; fail_cmd: return -EINVAL; } int q6asm_cmd(struct audio_client *ac, int cmd) { struct apr_hdr hdr; int rc; atomic_t *state; int cnt = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); switch (cmd) { case CMD_PAUSE: pr_debug("%s:CMD_PAUSE\n", __func__); hdr.opcode = ASM_SESSION_CMD_PAUSE; state = &ac->cmd_state; break; case CMD_FLUSH: pr_debug("%s:CMD_FLUSH\n", __func__); hdr.opcode = ASM_STREAM_CMD_FLUSH; state = &ac->cmd_state; break; case CMD_OUT_FLUSH: pr_debug("%s:CMD_OUT_FLUSH\n", __func__); hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS; state = &ac->cmd_state; break; case CMD_EOS: pr_debug("%s:CMD_EOS\n", __func__); hdr.opcode = ASM_DATA_CMD_EOS; atomic_set(&ac->cmd_state, 0); state = &ac->cmd_state; break; case CMD_CLOSE: pr_debug("%s:CMD_CLOSE\n", __func__); hdr.opcode = ASM_STREAM_CMD_CLOSE; state = &ac->cmd_state; break; default: pr_err("Invalid format[%d]\n", cmd); goto fail_cmd; } pr_debug("%s:session[%d]opcode[0x%x] ", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("Commmand 0x%x failed\n", hdr.opcode); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for response opcode[0x%x]\n", hdr.opcode); goto fail_cmd; } if (cmd == CMD_FLUSH) q6asm_reset_buf_state(ac); if (cmd == CMD_CLOSE) { /* check if DSP return all buffers */ if (ac->port[IN].buf) { for (cnt = 0; cnt < ac->port[IN].max_buf_cnt; cnt++) { if (ac->port[IN].buf[cnt].used == IN) { pr_debug("Write Buf[%d] not returned\n", cnt); } } } if (ac->port[OUT].buf) { for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) { if (ac->port[OUT].buf[cnt].used == OUT) { pr_debug("Read Buf[%d] not returned\n", cnt); } } } } return 0; fail_cmd: return -EINVAL; } int q6asm_cmd_nowait(struct audio_client *ac, int cmd) { struct apr_hdr hdr; int rc; if (!ac || ac->apr == NULL) { pr_err("%s:APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE); switch (cmd) { case CMD_PAUSE: pr_debug("%s:CMD_PAUSE\n", __func__); hdr.opcode = ASM_SESSION_CMD_PAUSE; break; case CMD_EOS: pr_debug("%s:CMD_EOS\n", __func__); hdr.opcode = ASM_DATA_CMD_EOS; break; default: pr_err("%s:Invalid format[%d]\n", __func__, cmd); goto fail_cmd; } pr_debug("%s:session[%d]opcode[0x%x] ", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } static void q6asm_reset_buf_state(struct audio_client *ac) { int cnt = 0; int loopcnt = 0; struct audio_port_data *port = NULL; if (ac->io_mode == SYNC_IO_MODE) { mutex_lock(&ac->cmd_lock); for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { port = &ac->port[loopcnt]; cnt = port->max_buf_cnt - 1; port->dsp_buf = 0; port->cpu_buf = 0; while (cnt >= 0) { if (!port->buf) continue; port->buf[cnt].used = 1; cnt--; } } mutex_unlock(&ac->cmd_lock); } } int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable) { struct asm_session_cmd_regx_overflow tx_overflow; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s:session[%d]enable[%d]\n", __func__, ac->session, enable); q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE); tx_overflow.hdr.opcode = \ ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS; /* tx overflow event: enable */ tx_overflow.enable_flag = enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow); if (rc < 0) { pr_err("tx overflow op[0x%x]rc[%d]\n", \ tx_overflow.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for tx overflow\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_get_apr_service_id(int session_id) { pr_debug("%s\n", __func__); if (session_id < 0 || session_id > SESSION_MAX) { pr_err("%s: invalid session_id = %d\n", __func__, session_id); return -EINVAL; } return ((struct apr_svc *)session[session_id]->apr)->id; } static int __init q6asm_init(void) { pr_debug("%s\n", __func__); memset(session, 0, sizeof(session)); config_debug_fs_init(); return 0; } device_initcall(q6asm_init);
gpl-2.0
faux123/kernel-moto-atrix4g
drivers/hwmon/ibmaem.c
500
27572
/* * A hwmon driver for the IBM System Director Active Energy Manager (AEM) * temperature/power/energy sensors and capping functionality. * Copyright (C) 2008 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ipmi.h> #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/kdev_t.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/math64.h> #include <linux/time.h> #define REFRESH_INTERVAL (HZ) #define IPMI_TIMEOUT (30 * HZ) #define DRVNAME "aem" #define AEM_NETFN 0x2E #define AEM_FIND_FW_CMD 0x80 #define AEM_ELEMENT_CMD 0x81 #define AEM_FW_INSTANCE_CMD 0x82 #define AEM_READ_ELEMENT_CFG 0x80 #define AEM_READ_BUFFER 0x81 #define AEM_READ_REGISTER 0x82 #define AEM_WRITE_REGISTER 0x83 #define AEM_SET_REG_MASK 0x84 #define AEM_CLEAR_REG_MASK 0x85 #define AEM_READ_ELEMENT_CFG2 0x86 #define AEM_CONTROL_ELEMENT 0 #define AEM_ENERGY_ELEMENT 1 #define AEM_CLOCK_ELEMENT 4 #define AEM_POWER_CAP_ELEMENT 7 #define AEM_EXHAUST_ELEMENT 9 #define AEM_POWER_ELEMENT 10 #define AEM_MODULE_TYPE_ID 0x0001 #define AEM2_NUM_ENERGY_REGS 2 #define AEM2_NUM_PCAP_REGS 6 #define AEM2_NUM_TEMP_REGS 2 #define AEM2_NUM_SENSORS 14 #define AEM1_NUM_ENERGY_REGS 1 #define AEM1_NUM_SENSORS 3 /* AEM 2.x has more energy registers */ #define AEM_NUM_ENERGY_REGS AEM2_NUM_ENERGY_REGS /* AEM 2.x needs more sensor files */ #define AEM_NUM_SENSORS AEM2_NUM_SENSORS #define POWER_CAP 0 #define POWER_CAP_MAX_HOTPLUG 1 #define POWER_CAP_MAX 2 #define POWER_CAP_MIN_WARNING 3 #define POWER_CAP_MIN 4 #define POWER_AUX 5 #define AEM_DEFAULT_POWER_INTERVAL 1000 #define AEM_MIN_POWER_INTERVAL 200 #define UJ_PER_MJ 1000L static DEFINE_IDR(aem_idr); static DEFINE_SPINLOCK(aem_idr_lock); static struct platform_driver aem_driver = { .driver = { .name = DRVNAME, .bus = &platform_bus_type, } }; struct aem_ipmi_data { struct completion read_complete; struct ipmi_addr address; ipmi_user_t user; int interface; struct kernel_ipmi_msg tx_message; long tx_msgid; void *rx_msg_data; unsigned short rx_msg_len; unsigned char rx_result; int rx_recv_type; struct device *bmc_device; }; struct aem_ro_sensor_template { char *label; ssize_t (*show)(struct device *dev, struct device_attribute *devattr, char *buf); int index; }; struct aem_rw_sensor_template { char *label; ssize_t (*show)(struct device *dev, struct device_attribute *devattr, char *buf); ssize_t (*set)(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); int index; }; struct aem_data { struct list_head list; struct device *hwmon_dev; struct platform_device *pdev; struct mutex lock; char valid; unsigned long last_updated; /* In jiffies */ u8 ver_major; u8 ver_minor; u8 module_handle; int id; struct aem_ipmi_data ipmi; /* Function to update sensors */ void (*update)(struct aem_data *data); /* * AEM 1.x sensors: * Available sensors: * Energy meter * Power meter * * AEM 2.x sensors: * Two energy meters * Two power meters * Two temperature sensors * Six power cap registers */ /* sysfs attrs */ struct sensor_device_attribute sensors[AEM_NUM_SENSORS]; /* energy use in mJ */ u64 energy[AEM_NUM_ENERGY_REGS]; /* power sampling interval in ms */ unsigned long power_period[AEM_NUM_ENERGY_REGS]; /* Everything past here is for AEM2 only */ /* power caps in dW */ u16 pcap[AEM2_NUM_PCAP_REGS]; /* exhaust temperature in C */ u8 temp[AEM2_NUM_TEMP_REGS]; }; /* Data structures returned by the AEM firmware */ struct aem_iana_id { u8 bytes[3]; }; static struct aem_iana_id system_x_id = { .bytes = {0x4D, 0x4F, 0x00} }; /* These are used to find AEM1 instances */ struct aem_find_firmware_req { struct aem_iana_id id; u8 rsvd; __be16 index; __be16 module_type_id; } __packed; struct aem_find_firmware_resp { struct aem_iana_id id; u8 num_instances; } __packed; /* These are used to find AEM2 instances */ struct aem_find_instance_req { struct aem_iana_id id; u8 instance_number; __be16 module_type_id; } __packed; struct aem_find_instance_resp { struct aem_iana_id id; u8 num_instances; u8 major; u8 minor; u8 module_handle; u16 record_id; } __packed; /* These are used to query sensors */ struct aem_read_sensor_req { struct aem_iana_id id; u8 module_handle; u8 element; u8 subcommand; u8 reg; u8 rx_buf_size; } __packed; struct aem_read_sensor_resp { struct aem_iana_id id; u8 bytes[0]; } __packed; /* Data structures to talk to the IPMI layer */ struct aem_driver_data { struct list_head aem_devices; struct ipmi_smi_watcher bmc_events; struct ipmi_user_hndl ipmi_hndlrs; }; static void aem_register_bmc(int iface, struct device *dev); static void aem_bmc_gone(int iface); static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); static void aem_remove_sensors(struct aem_data *data); static int aem_init_aem1(struct aem_ipmi_data *probe); static int aem_init_aem2(struct aem_ipmi_data *probe); static int aem1_find_sensors(struct aem_data *data); static int aem2_find_sensors(struct aem_data *data); static void update_aem1_sensors(struct aem_data *data); static void update_aem2_sensors(struct aem_data *data); static struct aem_driver_data driver_data = { .aem_devices = LIST_HEAD_INIT(driver_data.aem_devices), .bmc_events = { .owner = THIS_MODULE, .new_smi = aem_register_bmc, .smi_gone = aem_bmc_gone, }, .ipmi_hndlrs = { .ipmi_recv_hndl = aem_msg_handler, }, }; /* Functions to talk to the IPMI layer */ /* Initialize IPMI address, message buffers and user data */ static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface, struct device *bmc) { int err; init_completion(&data->read_complete); data->bmc_device = bmc; /* Initialize IPMI address */ data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; data->address.channel = IPMI_BMC_CHANNEL; data->address.data[0] = 0; data->interface = iface; /* Initialize message buffers */ data->tx_msgid = 0; data->tx_message.netfn = AEM_NETFN; /* Create IPMI messaging interface user */ err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs, data, &data->user); if (err < 0) { dev_err(bmc, "Unable to register user with IPMI " "interface %d\n", data->interface); return -EACCES; } return 0; } /* Send an IPMI command */ static int aem_send_message(struct aem_ipmi_data *data) { int err; err = ipmi_validate_addr(&data->address, sizeof(data->address)); if (err) goto out; data->tx_msgid++; err = ipmi_request_settime(data->user, &data->address, data->tx_msgid, &data->tx_message, data, 0, 0, 0); if (err) goto out1; return 0; out1: dev_err(data->bmc_device, "request_settime=%x\n", err); return err; out: dev_err(data->bmc_device, "validate_addr=%x\n", err); return err; } /* Dispatch IPMI messages to callers */ static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) { unsigned short rx_len; struct aem_ipmi_data *data = user_msg_data; if (msg->msgid != data->tx_msgid) { dev_err(data->bmc_device, "Mismatch between received msgid " "(%02x) and transmitted msgid (%02x)!\n", (int)msg->msgid, (int)data->tx_msgid); ipmi_free_recv_msg(msg); return; } data->rx_recv_type = msg->recv_type; if (msg->msg.data_len > 0) data->rx_result = msg->msg.data[0]; else data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE; if (msg->msg.data_len > 1) { rx_len = msg->msg.data_len - 1; if (data->rx_msg_len < rx_len) rx_len = data->rx_msg_len; data->rx_msg_len = rx_len; memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len); } else data->rx_msg_len = 0; ipmi_free_recv_msg(msg); complete(&data->read_complete); } /* ID functions */ /* Obtain an id */ static int aem_idr_get(int *id) { int i, err; again: if (unlikely(!idr_pre_get(&aem_idr, GFP_KERNEL))) return -ENOMEM; spin_lock(&aem_idr_lock); err = idr_get_new(&aem_idr, NULL, &i); spin_unlock(&aem_idr_lock); if (unlikely(err == -EAGAIN)) goto again; else if (unlikely(err)) return err; *id = i & MAX_ID_MASK; return 0; } /* Release an object ID */ static void aem_idr_put(int id) { spin_lock(&aem_idr_lock); idr_remove(&aem_idr, id); spin_unlock(&aem_idr_lock); } /* Sensor support functions */ /* Read a sensor value */ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, void *buf, size_t size) { int rs_size, res; struct aem_read_sensor_req rs_req; struct aem_read_sensor_resp *rs_resp; struct aem_ipmi_data *ipmi = &data->ipmi; /* AEM registers are 1, 2, 4 or 8 bytes */ switch (size) { case 1: case 2: case 4: case 8: break; default: return -EINVAL; } rs_req.id = system_x_id; rs_req.module_handle = data->module_handle; rs_req.element = elt; rs_req.subcommand = AEM_READ_REGISTER; rs_req.reg = reg; rs_req.rx_buf_size = size; ipmi->tx_message.cmd = AEM_ELEMENT_CMD; ipmi->tx_message.data = (char *)&rs_req; ipmi->tx_message.data_len = sizeof(rs_req); rs_size = sizeof(*rs_resp) + size; rs_resp = kzalloc(rs_size, GFP_KERNEL); if (!rs_resp) return -ENOMEM; ipmi->rx_msg_data = rs_resp; ipmi->rx_msg_len = rs_size; aem_send_message(ipmi); res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); if (!res) return -ETIMEDOUT; if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { kfree(rs_resp); return -ENOENT; } switch (size) { case 1: { u8 *x = buf; *x = rs_resp->bytes[0]; break; } case 2: { u16 *x = buf; *x = be16_to_cpup((__be16 *)rs_resp->bytes); break; } case 4: { u32 *x = buf; *x = be32_to_cpup((__be32 *)rs_resp->bytes); break; } case 8: { u64 *x = buf; *x = be64_to_cpup((__be64 *)rs_resp->bytes); break; } } return 0; } /* Update AEM energy registers */ static void update_aem_energy_one(struct aem_data *data, int which) { aem_read_sensor(data, AEM_ENERGY_ELEMENT, which, &data->energy[which], 8); } static void update_aem_energy(struct aem_data *data) { update_aem_energy_one(data, 0); if (data->ver_major < 2) return; update_aem_energy_one(data, 1); } /* Update all AEM1 sensors */ static void update_aem1_sensors(struct aem_data *data) { mutex_lock(&data->lock); if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && data->valid) goto out; update_aem_energy(data); out: mutex_unlock(&data->lock); } /* Update all AEM2 sensors */ static void update_aem2_sensors(struct aem_data *data) { int i; mutex_lock(&data->lock); if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && data->valid) goto out; update_aem_energy(data); aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 0, &data->temp[0], 1); aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 1, &data->temp[1], 1); for (i = POWER_CAP; i <= POWER_AUX; i++) aem_read_sensor(data, AEM_POWER_CAP_ELEMENT, i, &data->pcap[i], 2); out: mutex_unlock(&data->lock); } /* Delete an AEM instance */ static void aem_delete(struct aem_data *data) { list_del(&data->list); aem_remove_sensors(data); hwmon_device_unregister(data->hwmon_dev); ipmi_destroy_user(data->ipmi.user); dev_set_drvdata(&data->pdev->dev, NULL); platform_device_unregister(data->pdev); aem_idr_put(data->id); kfree(data); } /* Probe functions for AEM1 devices */ /* Retrieve version and module handle for an AEM1 instance */ static int aem_find_aem1_count(struct aem_ipmi_data *data) { int res; struct aem_find_firmware_req ff_req; struct aem_find_firmware_resp ff_resp; ff_req.id = system_x_id; ff_req.index = 0; ff_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID); data->tx_message.cmd = AEM_FIND_FW_CMD; data->tx_message.data = (char *)&ff_req; data->tx_message.data_len = sizeof(ff_req); data->rx_msg_data = &ff_resp; data->rx_msg_len = sizeof(ff_resp); aem_send_message(data); res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT); if (!res) return -ETIMEDOUT; if (data->rx_result || data->rx_msg_len != sizeof(ff_resp) || memcmp(&ff_resp.id, &system_x_id, sizeof(system_x_id))) return -ENOENT; return ff_resp.num_instances; } /* Find and initialize one AEM1 instance */ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) { struct aem_data *data; int i; int res = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return res; mutex_init(&data->lock); /* Copy instance data */ data->ver_major = 1; data->ver_minor = 0; data->module_handle = module_handle; for (i = 0; i < AEM1_NUM_ENERGY_REGS; i++) data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL; /* Create sub-device for this fw instance */ if (aem_idr_get(&data->id)) goto id_err; data->pdev = platform_device_alloc(DRVNAME, data->id); if (!data->pdev) goto dev_err; data->pdev->dev.driver = &aem_driver.driver; res = platform_device_add(data->pdev); if (res) goto ipmi_err; dev_set_drvdata(&data->pdev->dev, data); /* Set up IPMI interface */ if (aem_init_ipmi_data(&data->ipmi, probe->interface, probe->bmc_device)) goto ipmi_err; /* Register with hwmon */ data->hwmon_dev = hwmon_device_register(&data->pdev->dev); if (IS_ERR(data->hwmon_dev)) { dev_err(&data->pdev->dev, "Unable to register hwmon " "device for IPMI interface %d\n", probe->interface); goto hwmon_reg_err; } data->update = update_aem1_sensors; /* Find sensors */ if (aem1_find_sensors(data)) goto sensor_err; /* Add to our list of AEM devices */ list_add_tail(&data->list, &driver_data.aem_devices); dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n", data->ver_major, data->ver_minor, data->module_handle); return 0; sensor_err: hwmon_device_unregister(data->hwmon_dev); hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: dev_set_drvdata(&data->pdev->dev, NULL); platform_device_unregister(data->pdev); dev_err: aem_idr_put(data->id); id_err: kfree(data); return res; } /* Find and initialize all AEM1 instances */ static int aem_init_aem1(struct aem_ipmi_data *probe) { int num, i, err; num = aem_find_aem1_count(probe); for (i = 0; i < num; i++) { err = aem_init_aem1_inst(probe, i); if (err) { dev_err(probe->bmc_device, "Error %d initializing AEM1 0x%X\n", err, i); return err; } } return 0; } /* Probe functions for AEM2 devices */ /* Retrieve version and module handle for an AEM2 instance */ static int aem_find_aem2(struct aem_ipmi_data *data, struct aem_find_instance_resp *fi_resp, int instance_num) { int res; struct aem_find_instance_req fi_req; fi_req.id = system_x_id; fi_req.instance_number = instance_num; fi_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID); data->tx_message.cmd = AEM_FW_INSTANCE_CMD; data->tx_message.data = (char *)&fi_req; data->tx_message.data_len = sizeof(fi_req); data->rx_msg_data = fi_resp; data->rx_msg_len = sizeof(*fi_resp); aem_send_message(data); res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT); if (!res) return -ETIMEDOUT; if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) || memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) || fi_resp->num_instances <= instance_num) return -ENOENT; return 0; } /* Find and initialize one AEM2 instance */ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, struct aem_find_instance_resp *fi_resp) { struct aem_data *data; int i; int res = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return res; mutex_init(&data->lock); /* Copy instance data */ data->ver_major = fi_resp->major; data->ver_minor = fi_resp->minor; data->module_handle = fi_resp->module_handle; for (i = 0; i < AEM2_NUM_ENERGY_REGS; i++) data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL; /* Create sub-device for this fw instance */ if (aem_idr_get(&data->id)) goto id_err; data->pdev = platform_device_alloc(DRVNAME, data->id); if (!data->pdev) goto dev_err; data->pdev->dev.driver = &aem_driver.driver; res = platform_device_add(data->pdev); if (res) goto ipmi_err; dev_set_drvdata(&data->pdev->dev, data); /* Set up IPMI interface */ if (aem_init_ipmi_data(&data->ipmi, probe->interface, probe->bmc_device)) goto ipmi_err; /* Register with hwmon */ data->hwmon_dev = hwmon_device_register(&data->pdev->dev); if (IS_ERR(data->hwmon_dev)) { dev_err(&data->pdev->dev, "Unable to register hwmon " "device for IPMI interface %d\n", probe->interface); goto hwmon_reg_err; } data->update = update_aem2_sensors; /* Find sensors */ if (aem2_find_sensors(data)) goto sensor_err; /* Add to our list of AEM devices */ list_add_tail(&data->list, &driver_data.aem_devices); dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n", data->ver_major, data->ver_minor, data->module_handle); return 0; sensor_err: hwmon_device_unregister(data->hwmon_dev); hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: dev_set_drvdata(&data->pdev->dev, NULL); platform_device_unregister(data->pdev); dev_err: aem_idr_put(data->id); id_err: kfree(data); return res; } /* Find and initialize all AEM2 instances */ static int aem_init_aem2(struct aem_ipmi_data *probe) { struct aem_find_instance_resp fi_resp; int err; int i = 0; while (!aem_find_aem2(probe, &fi_resp, i)) { if (fi_resp.major != 2) { dev_err(probe->bmc_device, "Unknown AEM v%d; please " "report this to the maintainer.\n", fi_resp.major); i++; continue; } err = aem_init_aem2_inst(probe, &fi_resp); if (err) { dev_err(probe->bmc_device, "Error %d initializing AEM2 0x%X\n", err, fi_resp.module_handle); return err; } i++; } return 0; } /* Probe a BMC for AEM firmware instances */ static void aem_register_bmc(int iface, struct device *dev) { struct aem_ipmi_data probe; if (aem_init_ipmi_data(&probe, iface, dev)) return; /* Ignore probe errors; they won't cause problems */ aem_init_aem1(&probe); aem_init_aem2(&probe); ipmi_destroy_user(probe.user); } /* Handle BMC deletion */ static void aem_bmc_gone(int iface) { struct aem_data *p1, *next1; list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list) if (p1->ipmi.interface == iface) aem_delete(p1); } /* sysfs support functions */ /* AEM device name */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct aem_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s%d\n", DRVNAME, data->ver_major); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); /* AEM device version */ static ssize_t show_version(struct device *dev, struct device_attribute *devattr, char *buf) { struct aem_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d.%d\n", data->ver_major, data->ver_minor); } static SENSOR_DEVICE_ATTR(version, S_IRUGO, show_version, NULL, 0); /* Display power use */ static ssize_t aem_show_power(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *data = dev_get_drvdata(dev); u64 before, after, delta, time; signed long leftover; struct timespec b, a; mutex_lock(&data->lock); update_aem_energy_one(data, attr->index); getnstimeofday(&b); before = data->energy[attr->index]; leftover = schedule_timeout_interruptible( msecs_to_jiffies(data->power_period[attr->index]) ); if (leftover) { mutex_unlock(&data->lock); return 0; } update_aem_energy_one(data, attr->index); getnstimeofday(&a); after = data->energy[attr->index]; mutex_unlock(&data->lock); time = timespec_to_ns(&a) - timespec_to_ns(&b); delta = (after - before) * UJ_PER_MJ; return sprintf(buf, "%llu\n", (unsigned long long)div64_u64(delta * NSEC_PER_SEC, time)); } /* Display energy use */ static ssize_t aem_show_energy(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *a = dev_get_drvdata(dev); mutex_lock(&a->lock); update_aem_energy_one(a, attr->index); mutex_unlock(&a->lock); return sprintf(buf, "%llu\n", (unsigned long long)a->energy[attr->index] * 1000); } /* Display power interval registers */ static ssize_t aem_show_power_period(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *a = dev_get_drvdata(dev); a->update(a); return sprintf(buf, "%lu\n", a->power_period[attr->index]); } /* Set power interval registers */ static ssize_t aem_set_power_period(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *a = dev_get_drvdata(dev); unsigned long temp; int res; res = strict_strtoul(buf, 10, &temp); if (res) return res; if (temp < AEM_MIN_POWER_INTERVAL) return -EINVAL; mutex_lock(&a->lock); a->power_period[attr->index] = temp; mutex_unlock(&a->lock); return count; } /* Discover sensors on an AEM device */ static int aem_register_sensors(struct aem_data *data, struct aem_ro_sensor_template *ro, struct aem_rw_sensor_template *rw) { struct device *dev = &data->pdev->dev; struct sensor_device_attribute *sensors = data->sensors; int err; /* Set up read-only sensors */ while (ro->label) { sensors->dev_attr.attr.name = ro->label; sensors->dev_attr.attr.mode = S_IRUGO; sensors->dev_attr.show = ro->show; sensors->index = ro->index; err = device_create_file(dev, &sensors->dev_attr); if (err) { sensors->dev_attr.attr.name = NULL; goto error; } sensors++; ro++; } /* Set up read-write sensors */ while (rw->label) { sensors->dev_attr.attr.name = rw->label; sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; sensors->dev_attr.show = rw->show; sensors->dev_attr.store = rw->set; sensors->index = rw->index; err = device_create_file(dev, &sensors->dev_attr); if (err) { sensors->dev_attr.attr.name = NULL; goto error; } sensors++; rw++; } err = device_create_file(dev, &sensor_dev_attr_name.dev_attr); if (err) goto error; err = device_create_file(dev, &sensor_dev_attr_version.dev_attr); return err; error: aem_remove_sensors(data); return err; } /* sysfs support functions for AEM2 sensors */ /* Display temperature use */ static ssize_t aem2_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *a = dev_get_drvdata(dev); a->update(a); return sprintf(buf, "%u\n", a->temp[attr->index] * 1000); } /* Display power-capping registers */ static ssize_t aem2_show_pcap_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct aem_data *a = dev_get_drvdata(dev); a->update(a); return sprintf(buf, "%u\n", a->pcap[attr->index] * 100000); } /* Remove sensors attached to an AEM device */ static void aem_remove_sensors(struct aem_data *data) { int i; for (i = 0; i < AEM_NUM_SENSORS; i++) { if (!data->sensors[i].dev_attr.attr.name) continue; device_remove_file(&data->pdev->dev, &data->sensors[i].dev_attr); } device_remove_file(&data->pdev->dev, &sensor_dev_attr_name.dev_attr); device_remove_file(&data->pdev->dev, &sensor_dev_attr_version.dev_attr); } /* Sensor probe functions */ /* Description of AEM1 sensors */ static struct aem_ro_sensor_template aem1_ro_sensors[] = { {"energy1_input", aem_show_energy, 0}, {"power1_average", aem_show_power, 0}, {NULL, NULL, 0}, }; static struct aem_rw_sensor_template aem1_rw_sensors[] = { {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, {NULL, NULL, NULL, 0}, }; /* Description of AEM2 sensors */ static struct aem_ro_sensor_template aem2_ro_sensors[] = { {"energy1_input", aem_show_energy, 0}, {"energy2_input", aem_show_energy, 1}, {"power1_average", aem_show_power, 0}, {"power2_average", aem_show_power, 1}, {"temp1_input", aem2_show_temp, 0}, {"temp2_input", aem2_show_temp, 1}, {"power4_average", aem2_show_pcap_value, POWER_CAP_MAX_HOTPLUG}, {"power5_average", aem2_show_pcap_value, POWER_CAP_MAX}, {"power6_average", aem2_show_pcap_value, POWER_CAP_MIN_WARNING}, {"power7_average", aem2_show_pcap_value, POWER_CAP_MIN}, {"power3_average", aem2_show_pcap_value, POWER_AUX}, {"power_cap", aem2_show_pcap_value, POWER_CAP}, {NULL, NULL, 0}, }; static struct aem_rw_sensor_template aem2_rw_sensors[] = { {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, {"power2_average_interval", aem_show_power_period, aem_set_power_period, 1}, {NULL, NULL, NULL, 0}, }; /* Set up AEM1 sensor attrs */ static int aem1_find_sensors(struct aem_data *data) { return aem_register_sensors(data, aem1_ro_sensors, aem1_rw_sensors); } /* Set up AEM2 sensor attrs */ static int aem2_find_sensors(struct aem_data *data) { return aem_register_sensors(data, aem2_ro_sensors, aem2_rw_sensors); } /* Module init/exit routines */ static int __init aem_init(void) { int res; res = driver_register(&aem_driver.driver); if (res) { printk(KERN_ERR "Can't register aem driver\n"); return res; } res = ipmi_smi_watcher_register(&driver_data.bmc_events); if (res) goto ipmi_reg_err; return 0; ipmi_reg_err: driver_unregister(&aem_driver.driver); return res; } static void __exit aem_exit(void) { struct aem_data *p1, *next1; ipmi_smi_watcher_unregister(&driver_data.bmc_events); driver_unregister(&aem_driver.driver); list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list) aem_delete(p1); } MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver"); MODULE_LICENSE("GPL"); module_init(aem_init); module_exit(aem_exit); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3350-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBM3850M2/x3950M2-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMBladeHC10-*");
gpl-2.0
faux123/pyramid-2.6.35_sense
drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
1012
23287
/****************************************************************************** Copyright(c) 2004 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen <jkmaline@cc.hut.fi> Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ******************************************************************************/ #include <linux/wireless.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/module.h> #include "ieee80211.h" static const char *ieee80211_modes[] = { "?", "a", "b", "ab", "g", "ag", "bg", "abg" }; #define MAX_CUSTOM_LEN 64 static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee, char *start, char *stop, struct ieee80211_network *network, struct iw_request_info *info) { char custom[MAX_CUSTOM_LEN]; char *p; struct iw_event iwe; int i, j; u8 max_rate, rate; /* First entry *MUST* be the AP MAC address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); /* Remaining entries will be displayed in the order we provide them */ /* Add the ESSID */ iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; //YJ,modified,080903,for hidden ap //if (network->flags & NETWORK_EMPTY_ESSID) { if (network->ssid_len == 0) { //YJ,modified,080903,end iwe.u.data.length = sizeof("<hidden>"); start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>"); } else { iwe.u.data.length = min(network->ssid_len, (u8)32); start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); } //printk("ESSID: %s\n",network->ssid); /* Add the protocol name */ iwe.cmd = SIOCGIWNAME; snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]); start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); /* Add mode */ iwe.cmd = SIOCGIWMODE; if (network->capability & (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) { if (network->capability & WLAN_CAPABILITY_BSS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); } /* Add frequency/channel */ iwe.cmd = SIOCGIWFREQ; /* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode); iwe.u.freq.e = 3; */ iwe.u.freq.m = network->channel; iwe.u.freq.e = 0; iwe.u.freq.i = 0; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; if (network->capability & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); /* Add basic and extended rates */ max_rate = 0; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); for (i = 0, j = 0; i < network->rates_len; ) { if (j < network->rates_ex_len && ((network->rates_ex[j] & 0x7F) < (network->rates[i] & 0x7F))) rate = network->rates_ex[j++] & 0x7F; else rate = network->rates[i++] & 0x7F; if (rate > max_rate) max_rate = rate; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); } for (; j < network->rates_ex_len; j++) { rate = network->rates_ex[j] & 0x7F; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); if (rate > max_rate) max_rate = rate; } iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = max_rate * 500000; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); /* Add quality statistics */ /* TODO: Fix these values... */ if (network->stats.signal == 0 || network->stats.rssi == 0) printk("========>signal:%d, rssi:%d\n", network->stats.signal, network->stats.rssi); iwe.cmd = IWEVQUAL; // printk("SIGNAL: %d,RSSI: %d,NOISE: %d\n",network->stats.signal,network->stats.rssi,network->stats.noise); iwe.u.qual.qual = network->stats.signalstrength; iwe.u.qual.level = network->stats.signal; iwe.u.qual.noise = network->stats.noise; iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK; if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID; iwe.u.qual.updated = 7; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); iwe.cmd = IWEVCUSTOM; p = custom; iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); memset(&iwe, 0, sizeof(iwe)); if (network->wpa_ie_len) { // printk("wpa_ie_len:%d\n", network->wpa_ie_len); char buf[MAX_WPA_IE_LEN]; memcpy(buf, network->wpa_ie, network->wpa_ie_len); iwe.cmd = IWEVGENIE; iwe.u.data.length = network->wpa_ie_len; start = iwe_stream_add_point(info, start, stop, &iwe, buf); } memset(&iwe, 0, sizeof(iwe)); if (network->rsn_ie_len) { // printk("=====>rsn_ie_len:\n", network->rsn_ie_len); char buf[MAX_WPA_IE_LEN]; memcpy(buf, network->rsn_ie, network->rsn_ie_len); iwe.cmd = IWEVGENIE; iwe.u.data.length = network->rsn_ie_len; start = iwe_stream_add_point(info, start, stop, &iwe, buf); } /* Add EXTRA: Age to display seconds since last beacon/probe response * for given network. */ iwe.cmd = IWEVCUSTOM; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100)); iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); return start; } int ieee80211_wx_get_scan(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ieee80211_network *network; unsigned long flags; int err = 0; char *ev = extra; char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA; //char *stop = ev + IW_SCAN_MAX_DATA; int i = 0; IEEE80211_DEBUG_WX("Getting scan\n"); down(&ieee->wx_sem); spin_lock_irqsave(&ieee->lock, flags); if(!ieee->bHwRadioOff) { list_for_each_entry(network, &ieee->network_list, list) { i++; if((stop-ev)<200) { err = -E2BIG; break; } if (ieee->scan_age == 0 || time_after(network->last_scanned + ieee->scan_age, jiffies)) { ev = rtl818x_translate_scan(ieee, ev, stop, network, info); } else IEEE80211_DEBUG_SCAN( "Not showing network '%s (" "%pM)' due to age (%lums).\n", escape_essid(network->ssid, network->ssid_len), network->bssid, (jiffies - network->last_scanned) / (HZ / 100)); } } spin_unlock_irqrestore(&ieee->lock, flags); up(&ieee->wx_sem); wrqu->data.length = ev - extra; wrqu->data.flags = 0; IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); return err; } int ieee80211_wx_set_encode(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &(wrqu->encoding); struct net_device *dev = ieee->dev; struct ieee80211_security sec = { .flags = 0 }; int i, key, key_provided, len; struct ieee80211_crypt_data **crypt; IEEE80211_DEBUG_WX("SET_ENCODE\n"); key = erq->flags & IW_ENCODE_INDEX; if (key) { if (key > WEP_KEYS) return -EINVAL; key--; key_provided = 1; } else { key_provided = 0; key = ieee->tx_keyidx; } IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ? "provided" : "default"); crypt = &ieee->crypt[key]; if (erq->flags & IW_ENCODE_DISABLED) { if (key_provided && *crypt) { IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n", key); ieee80211_crypt_delayed_deinit(ieee, crypt); } else IEEE80211_DEBUG_WX("Disabling encryption.\n"); /* Check all the keys to see if any are still configured, * and if no key index was provided, de-init them all */ for (i = 0; i < WEP_KEYS; i++) { if (ieee->crypt[i] != NULL) { if (key_provided) break; ieee80211_crypt_delayed_deinit( ieee, &ieee->crypt[i]); } } if (i == WEP_KEYS) { sec.enabled = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_ENABLED | SEC_LEVEL; } goto done; } sec.enabled = 1; sec.flags |= SEC_ENABLED; if (*crypt != NULL && (*crypt)->ops != NULL && strcmp((*crypt)->ops->name, "WEP") != 0) { /* changing to use WEP; deinit previously used algorithm * on this key */ ieee80211_crypt_delayed_deinit(ieee, crypt); } if (*crypt == NULL) { struct ieee80211_crypt_data *new_crypt; /* take WEP into use */ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) return -ENOMEM; new_crypt->ops = ieee80211_get_crypto_ops("WEP"); if (!new_crypt->ops) new_crypt->ops = ieee80211_get_crypto_ops("WEP"); if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { kfree(new_crypt); new_crypt = NULL; printk(KERN_WARNING "%s: could not initialize WEP: " "load module ieee80211_crypt_wep\n", dev->name); return -EOPNOTSUPP; } *crypt = new_crypt; } /* If a new key was provided, set it up */ if (erq->length > 0) { len = erq->length <= 5 ? 5 : 13; memcpy(sec.keys[key], keybuf, erq->length); if (len > erq->length) memset(sec.keys[key] + erq->length, 0, len - erq->length); IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n", key, escape_essid(sec.keys[key], len), erq->length, len); sec.key_sizes[key] = len; (*crypt)->ops->set_key(sec.keys[key], len, NULL, (*crypt)->priv); sec.flags |= (1 << key); /* This ensures a key will be activated if no key is * explicitely set */ if (key == sec.active_key) sec.flags |= SEC_ACTIVE_KEY; ieee->tx_keyidx = key;//by wb 080312 } else { len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, NULL, (*crypt)->priv); if (len == 0) { /* Set a default key of all 0 */ IEEE80211_DEBUG_WX("Setting key %d to all zero.\n", key); memset(sec.keys[key], 0, 13); (*crypt)->ops->set_key(sec.keys[key], 13, NULL, (*crypt)->priv); sec.key_sizes[key] = 13; sec.flags |= (1 << key); } /* No key data - just set the default TX key index */ if (key_provided) { IEEE80211_DEBUG_WX( "Setting key %d to default Tx key.\n", key); ieee->tx_keyidx = key; sec.active_key = key; sec.flags |= SEC_ACTIVE_KEY; } } done: ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; sec.flags |= SEC_AUTH_MODE; IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ? "OPEN" : "SHARED KEY"); /* For now we just support WEP, so only set that security level... * TODO: When WPA is added this is one place that needs to change */ sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ if (ieee->set_security) ieee->set_security(dev, &sec); /* Do not reset port if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. If your hardware requires a reset after WEP * configuration (for example... Prism2), implement the reset_port in * the callbacks structures used to initialize the 802.11 stack. */ if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(dev)) { printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); return -EINVAL; } return 0; } int ieee80211_wx_get_encode(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &(wrqu->encoding); int len, key; struct ieee80211_crypt_data *crypt; IEEE80211_DEBUG_WX("GET_ENCODE\n"); if(ieee->iw_mode == IW_MODE_MONITOR) return -1; key = erq->flags & IW_ENCODE_INDEX; if (key) { if (key > WEP_KEYS) return -EINVAL; key--; } else key = ieee->tx_keyidx; crypt = ieee->crypt[key]; erq->flags = key + 1; if (crypt == NULL || crypt->ops == NULL) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } if (strcmp(crypt->ops->name, "WEP") != 0) { /* only WEP is supported with wireless extensions, so just * report that encryption is used */ erq->length = 0; erq->flags |= IW_ENCODE_ENABLED; return 0; } len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv); erq->length = (len >= 0 ? len : 0); erq->flags |= IW_ENCODE_ENABLED; if (ieee->open_wep) erq->flags |= IW_ENCODE_OPEN; else erq->flags |= IW_ENCODE_RESTRICTED; return 0; } int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct net_device *dev = ieee->dev; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx, ret = 0; int group_key = 0; const char *alg; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; struct ieee80211_security sec = { .flags = 0, }; //printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg); idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (idx < 1 || idx > WEP_KEYS) return -EINVAL; idx--; } else idx = ieee->tx_keyidx; if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { crypt = &ieee->crypt[idx]; group_key = 1; } else { /* some Cisco APs use idx>0 for unicast in dynamic WEP */ //printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg); if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP) return -EINVAL; if (ieee->iw_mode == IW_MODE_INFRA) crypt = &ieee->crypt[idx]; else return -EINVAL; } sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT; if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) { if (*crypt) ieee80211_crypt_delayed_deinit(ieee, crypt); for (i = 0; i < WEP_KEYS; i++) if (ieee->crypt[i] != NULL) break; if (i == WEP_KEYS) { sec.enabled = 0; // sec.encrypt = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_LEVEL; } //printk("disabled: flag:%x\n", encoding->flags); goto done; } sec.enabled = 1; // sec.encrypt = 1; switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; break; default: IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); ret = -EINVAL; goto done; } // printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg); ops = ieee80211_get_crypto_ops(alg); if (ops == NULL) ops = ieee80211_get_crypto_ops(alg); if (ops == NULL) { IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); printk("========>unknown crypto alg %d\n", ext->alg); ret = -EINVAL; goto done; } if (*crypt == NULL || (*crypt)->ops != ops) { struct ieee80211_crypt_data *new_crypt; ieee80211_crypt_delayed_deinit(ieee, crypt); new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } new_crypt->ops = ops; if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); ret = -EINVAL; goto done; } *crypt = new_crypt; } if (ext->key_len > 0 && (*crypt)->ops->set_key && (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, (*crypt)->priv) < 0) { IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name); printk("key setting failed\n"); ret = -EINVAL; goto done; } #if 1 //skip_host_crypt: //printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags); if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { ieee->tx_keyidx = idx; sec.active_key = idx; sec.flags |= SEC_ACTIVE_KEY; } if (ext->alg != IW_ENCODE_ALG_NONE) { memcpy(sec.keys[idx], ext->key, ext->key_len); sec.key_sizes[idx] = ext->key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { // sec.encode_alg[idx] = SEC_ALG_WEP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } else if (ext->alg == IW_ENCODE_ALG_TKIP) { // sec.encode_alg[idx] = SEC_ALG_TKIP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_2; } else if (ext->alg == IW_ENCODE_ALG_CCMP) { // sec.encode_alg[idx] = SEC_ALG_CCMP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_3; } /* Don't set sec level for group keys. */ if (group_key) sec.flags &= ~SEC_LEVEL; } #endif done: if (ieee->set_security) ieee->set_security(ieee->dev, &sec); if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(dev)) { IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name); return -EINVAL; } return ret; } int ieee80211_wx_set_mlme(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_mlme *mlme = (struct iw_mlme *) extra; // printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd); #if 1 switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: // printk("disassoc now\n"); ieee80211_disassociate(ieee); break; default: return -EOPNOTSUPP; } #endif return 0; } int ieee80211_wx_set_auth(struct ieee80211_device *ieee, struct iw_request_info *info, struct iw_param *data, char *extra) { /* struct ieee80211_security sec = { .flags = SEC_AUTH_MODE, } */ //printk("set auth:flag:%x, data value:%x\n", data->flags, data->value); switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: /*need to support wpa2 here*/ //printk("wpa version:%x\n", data->value); break; case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * * Host AP driver does not use these parameters and allows * * wpa_supplicant to control them internally. * */ break; case IW_AUTH_TKIP_COUNTERMEASURES: ieee->tkip_countermeasures = data->value; break; case IW_AUTH_DROP_UNENCRYPTED: ieee->drop_unencrypted = data->value; break; case IW_AUTH_80211_AUTH_ALG: ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0; //printk("open_wep:%d\n", ieee->open_wep); break; #if 1 case IW_AUTH_WPA_ENABLED: ieee->wpa_enabled = (data->value)?1:0; //printk("enable wpa:%d\n", ieee->wpa_enabled); break; #endif case IW_AUTH_RX_UNENCRYPTED_EAPOL: ieee->ieee802_1x = data->value; break; case IW_AUTH_PRIVACY_INVOKED: ieee->privacy_invoked = data->value; break; default: return -EOPNOTSUPP; } return 0; } #if 1 int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len) { u8 *buf = NULL; if (len>MAX_WPA_IE_LEN || (len && ie == NULL)) { printk("return error out, len:%zu\n", len); return -EINVAL; } if (len) { if (len != ie[1]+2){ printk("len:%zu, ie:%d\n", len, ie[1]); return -EINVAL; } buf = kmemdup(ie, len, GFP_KERNEL); if (buf == NULL) return -ENOMEM; kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = len; } else{ if (ieee->wpa_ie) kfree(ieee->wpa_ie); ieee->wpa_ie = NULL; ieee->wpa_ie_len = 0; } // printk("<=====out %s()\n", __func__); return 0; } #endif
gpl-2.0
olicmoon/linux
drivers/w1/slaves/w1_ds2406.c
1524
3645
/* * w1_ds2406.c - w1 family 12 (DS2406) driver * based on w1_ds2413.c by Mariusz Bialonczyk <manio@skyboo.net> * * Copyright (c) 2014 Scott Alfter <scott@alfter.us> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/crc16.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Scott Alfter <scott@alfter.us>"); MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO"); #define W1_F12_FUNC_READ_STATUS 0xAA #define W1_F12_FUNC_WRITE_STATUS 0x55 static ssize_t w1_f12_read_state( struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { u8 w1_buf[6]={W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0}; struct w1_slave *sl = kobj_to_w1_slave(kobj); u16 crc=0; int i; ssize_t rtnval=1; if (off != 0) return 0; if (!buf) return -EINVAL; mutex_lock(&sl->master->bus_mutex); if (w1_reset_select_slave(sl)) { mutex_unlock(&sl->master->bus_mutex); return -EIO; } w1_write_block(sl->master, w1_buf, 3); w1_read_block(sl->master, w1_buf+3, 3); for (i=0; i<6; i++) crc=crc16_byte(crc, w1_buf[i]); if (crc==0xb001) /* good read? */ *buf=((w1_buf[3]>>5)&3)|0x30; else rtnval=-EIO; mutex_unlock(&sl->master->bus_mutex); return rtnval; } static ssize_t w1_f12_write_output( struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); u8 w1_buf[6]={W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0}; u16 crc=0; int i; ssize_t rtnval=1; if (count != 1 || off != 0) return -EFAULT; mutex_lock(&sl->master->bus_mutex); if (w1_reset_select_slave(sl)) { mutex_unlock(&sl->master->bus_mutex); return -EIO; } w1_buf[3] = (((*buf)&3)<<5)|0x1F; w1_write_block(sl->master, w1_buf, 4); w1_read_block(sl->master, w1_buf+4, 2); for (i=0; i<6; i++) crc=crc16_byte(crc, w1_buf[i]); if (crc==0xb001) /* good read? */ w1_write_8(sl->master, 0xFF); else rtnval=-EIO; mutex_unlock(&sl->master->bus_mutex); return rtnval; } #define NB_SYSFS_BIN_FILES 2 static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = { { .attr = { .name = "state", .mode = S_IRUGO, }, .size = 1, .read = w1_f12_read_state, }, { .attr = { .name = "output", .mode = S_IRUGO | S_IWUSR | S_IWGRP, }, .size = 1, .write = w1_f12_write_output, } }; static int w1_f12_add_slave(struct w1_slave *sl) { int err = 0; int i; for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i) err = sysfs_create_bin_file( &sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); if (err) while (--i >= 0) sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); return err; } static void w1_f12_remove_slave(struct w1_slave *sl) { int i; for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); } static struct w1_family_ops w1_f12_fops = { .add_slave = w1_f12_add_slave, .remove_slave = w1_f12_remove_slave, }; static struct w1_family w1_family_12 = { .fid = W1_FAMILY_DS2406, .fops = &w1_f12_fops, }; static int __init w1_f12_init(void) { return w1_register_family(&w1_family_12); } static void __exit w1_f12_exit(void) { w1_unregister_family(&w1_family_12); } module_init(w1_f12_init); module_exit(w1_f12_exit);
gpl-2.0
GenetICS/lge_kernel_msm7x27
arch/arm/mach-at91/board-sam9-l9260.c
2292
4989
/* * linux/arch/arm/mach-at91/board-sam9-l9260.c * * Copyright (C) 2005 SAN People * Copyright (C) 2006 Atmel * Copyright (C) 2007 Olimex Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); /* Setup the LEDs */ at91_init_leds(AT91_PIN_PA9, AT91_PIN_PA6); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9260_init_interrupts(NULL); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !defined(CONFIG_MMC_AT91) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #endif }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PA7, .is_rmii = 0, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Bootloader Area", .offset = 0, .size = 10 * SZ_1M, }, { .name = "User Area", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, // .det_pin = ... not connected .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .partition_info = nand_partitions, }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .slot_b = 1, .wire4 = 1, .det_pin = AT91_PIN_PC8, .wp_pin = AT91_PIN_PC4, // .vcc_pin = ... not connected }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* NAND */ ek_add_device_nand(); /* Ethernet */ at91_add_device_eth(&ek_macb_data); /* MMC */ at91_add_device_mmc(0, &ek_mmc_data); /* I2C */ at91_add_device_i2c(NULL, 0); } MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260") /* Maintainer: Olimex */ .timer = &at91sam926x_timer, .map_io = at91sam9260_map_io, .init_early = ek_init_early, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
rogrady/lin_imx6
arch/arm/mach-at91/board-eb9200.c
2292
3368
/* * linux/arch/arm/mach-at91/board-eb9200.c * * Copyright (C) 2005 SAN People, adapted for ATEB9200 from Embest * by Andrew Patrikalakis * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/device.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/gpio.h> #include "generic.h" static void __init eb9200_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91rm9200_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART2 on ttyS2. (Rx, Tx) - IRDA */ at91_register_uart(AT91RM9200_ID_US2, 2, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init eb9200_init_irq(void) { at91rm9200_init_interrupts(NULL); } static struct at91_eth_data __initdata eb9200_eth_data = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 1, }; static struct at91_usbh_data __initdata eb9200_usbh_data = { .ports = 2, }; static struct at91_udc_data __initdata eb9200_udc_data = { .vbus_pin = AT91_PIN_PD4, .pullup_pin = AT91_PIN_PD5, }; static struct at91_cf_data __initdata eb9200_cf_data = { .det_pin = AT91_PIN_PB0, .rst_pin = AT91_PIN_PC5, // .irq_pin = ... not connected // .vcc_pin = ... always powered }; static struct at91_mmc_data __initdata eb9200_mmc_data = { .slot_b = 0, .wire4 = 1, }; static struct i2c_board_info __initdata eb9200_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50), }, }; static void __init eb9200_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&eb9200_eth_data); /* USB Host */ at91_add_device_usbh(&eb9200_usbh_data); /* USB Device */ at91_add_device_udc(&eb9200_udc_data); /* I2C */ at91_add_device_i2c(eb9200_i2c_devices, ARRAY_SIZE(eb9200_i2c_devices)); /* Compact Flash */ at91_add_device_cf(&eb9200_cf_data); /* SPI */ at91_add_device_spi(NULL, 0); /* MMC */ /* only supports 1 or 4 bit interface, not wired through to SPI */ at91_add_device_mmc(0, &eb9200_mmc_data); } MACHINE_START(ATEB9200, "Embest ATEB9200") .timer = &at91rm9200_timer, .map_io = at91rm9200_map_io, .init_early = eb9200_init_early, .init_irq = eb9200_init_irq, .init_machine = eb9200_board_init, MACHINE_END
gpl-2.0
intervigilium/android_kernel_samsung_klte
drivers/scsi/mvsas/mv_sas.c
3316
57197
/* * Marvell 88SE64xx/88SE94xx main function * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <kewei@marvell.com> * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "mv_sas.h" static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct mvs_slot_info *slot; slot = task->lldd_task; *tag = slot->slot_tag; return 1; } return 0; } void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { void *bitmap = mvi->tags; clear_bit(tag, bitmap); } void mvs_tag_free(struct mvs_info *mvi, u32 tag) { mvs_tag_clear(mvi, tag); } void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { void *bitmap = mvi->tags; set_bit(tag, bitmap); } inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; void *bitmap = mvi->tags; index = find_first_zero_bit(bitmap, mvi->tags_num); tag = index; if (tag >= mvi->tags_num) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; return 0; } void mvs_tag_init(struct mvs_info *mvi) { int i; for (i = 0; i < mvi->tags_num; ++i) mvs_tag_clear(mvi, i); } struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; struct sas_ha_struct *sha = dev->port->ha; struct mvs_info *mvi = NULL; struct asd_sas_phy *phy; while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { phy = container_of(sha->sas_port[i]->phy_list.next, struct asd_sas_phy, port_phy_el); j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) break; j++; } break; } i++; } hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; return mvi; } int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) { unsigned long i = 0, j = 0, n = 0, num = 0; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; struct sas_ha_struct *sha = dev->port->ha; while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { struct asd_sas_phy *phy; list_for_each_entry(phy, &sha->sas_port[i]->phy_list, port_phy_el) { j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) break; j++; } phyno[n] = (j >= mvi->chip->n_phy) ? (j - mvi->chip->n_phy) : j; num++; n++; } break; } i++; } return num; } struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set) { u32 dev_no; for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) continue; if (mvi->devices[dev_no].taskfileset == reg_set) return &mvi->devices[dev_no]; } return NULL; } static inline void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_device *dev) { if (!dev) { mv_printk("device has been free.\n"); return; } if (dev->taskfileset == MVS_ID_NOT_MAPPED) return; MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); } static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_device *dev) { if (dev->taskfileset != MVS_ID_NOT_MAPPED) return 0; return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); } void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) { u32 no; for_each_phy(phy_mask, phy_mask, no) { if (!(phy_mask & 1)) continue; MVS_CHIP_DISP->phy_reset(mvi, no, hard); } } int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { int rc = 0, phy_id = sas_phy->id; u32 tmp, i = 0, hi; struct sas_ha_struct *sha = sas_phy->ha; struct mvs_info *mvi = NULL; while (sha->sas_phy[i]) { if (sha->sas_phy[i] == sas_phy) break; i++; } hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; switch (func) { case PHY_FUNC_SET_LINK_RATE: MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); break; case PHY_FUNC_HARD_RESET: tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); if (tmp & PHY_RST_HARD) break; MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); break; case PHY_FUNC_LINK_RESET: MVS_CHIP_DISP->phy_enable(mvi, phy_id); MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); break; case PHY_FUNC_DISABLE: MVS_CHIP_DISP->phy_disable(mvi, phy_id); break; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: rc = -ENOSYS; } msleep(200); return rc; } void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, u32 off_hi, u64 sas_addr) { u32 lo = (u32)sas_addr; u32 hi = (u32)(sas_addr>>32); MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); } static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) { struct mvs_phy *phy = &mvi->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha; if (!phy->phy_attached) return; if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) && phy->phy_type & PORT_TYPE_SAS) { return; } sas_ha = mvi->sas; sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; sphy->negotiated_linkrate = sas_phy->linkrate; sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate = phy->maximum_linkrate; sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); } if (phy->phy_type & PORT_TYPE_SAS) { struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; id->dev_type = phy->identify.device_type; id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; /* direct attached SAS device */ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); } } else if (phy->phy_type & PORT_TYPE_SATA) { /*Nothing*/ } mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); sas_phy->frame_rcvd_size = phy->frame_rcvd_size; mvi->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); } void mvs_scan_start(struct Scsi_Host *shost) { int i, j; unsigned short core_nr; struct mvs_info *mvi; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct mvs_prv_info *mvs_prv = sha->lldd_ha; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; for (j = 0; j < core_nr; j++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; for (i = 0; i < mvi->chip->n_phy; ++i) mvs_bytes_dmaed(mvi, i); } mvs_prv->scan_finished = 1; } int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct mvs_prv_info *mvs_prv = sha->lldd_ha; if (mvs_prv->scan_finished == 0) return 0; sas_drain_work(sha); return 1; } static int mvs_task_prep_smp(struct mvs_info *mvi, struct mvs_task_exec_info *tei) { int elem, rc, i; struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct domain_device *dev = task->dev; struct asd_sas_port *sas_port = dev->port; struct scatterlist *sg_req, *sg_resp; u32 req_len, resp_len, tag = tei->tag; void *buf_tmp; u8 *buf_oaf; dma_addr_t buf_tmp_dma; void *buf_prd; struct mvs_slot_info *slot = &mvi->slot_info[tag]; u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); /* * DMA-map SMP request, response buffers */ sg_req = &task->smp_task.smp_req; elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); if (!elem) return -ENOMEM; req_len = sg_dma_len(sg_req); sg_resp = &task->smp_task.smp_resp; elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); if (!elem) { rc = -ENOMEM; goto err_out; } resp_len = SB_RFB_MAX; /* must be in dwords */ if ((req_len & 0x3) || (resp_len & 0x3)) { rc = -EINVAL; goto err_out_2; } /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table *********************************** */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; /* * Fill in TX ring and command slot header */ slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | TXQ_MODE_I | tag | (sas_port->phy_mask << TXQ_PHY_SHIFT)); hdr->flags |= flags; hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); hdr->tags = cpu_to_le32(tag); hdr->data_len = 0; /* generate open address frame hdr (first 12 bytes) */ /* initiator, SMP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; err_out_2: dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); err_out: dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); return rc; } static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) { struct ata_queued_cmd *qc = task->uldd_task; if (qc) { if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { *tag = qc->tag; return 1; } } return 0; } static int mvs_task_prep_ata(struct mvs_info *mvi, struct mvs_task_exec_info *tei) { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; void *buf_prd; u32 tag = tei->tag, hdr_tag; u32 flags, del_q; void *buf_tmp; u8 *buf_cmd, *buf_oaf; dma_addr_t buf_tmp_dma; u32 i, req_len, resp_len; const u32 max_resp_len = SB_RFB_MAX; if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { mv_dprintk("Have not enough regiset for dev %d.\n", mvi_dev->device_id); return -EBUSY; } slot = &mvi->slot_info[tag]; slot->tx = mvi->tx_prod; del_q = TXQ_MODE_I | tag | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | (sas_port->phy_mask << TXQ_PHY_SHIFT) | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); if (task->data_dir == DMA_FROM_DEVICE) flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); else flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); if (task->ata_task.use_ncq) flags |= MCH_FPDMA; if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) flags |= MCH_ATAPI; } hdr->flags = cpu_to_le32(flags); if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); else hdr_tag = tag; hdr->tags = cpu_to_le32(hdr_tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ buf_cmd = buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_ATA_CMD_SZ; buf_tmp_dma += MVS_ATA_CMD_SZ; /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ /* used for STP. unused for SATA? */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; req_len = sizeof(struct host_to_dev_fis); resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - sizeof(struct mvs_err_info) - i; /* request, response lengths */ resp_len = min(resp_len, max_resp_len); hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS and ATAPI CDB */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) memcpy(buf_cmd + STP_ATAPI_CMD, task->ata_task.atapi_packet, 16); /* generate open address frame hdr (first 12 bytes) */ /* initiator, STP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); if (task->data_dir == DMA_FROM_DEVICE) MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); return 0; } static int mvs_task_prep_ssp(struct mvs_info *mvi, struct mvs_task_exec_info *tei, int is_tmf, struct mvs_tmf_task *tmf) { struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct mvs_port *port = tei->port; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; void *buf_prd; struct ssp_frame_hdr *ssp_hdr; void *buf_tmp; u8 *buf_cmd, *buf_oaf, fburst = 0; dma_addr_t buf_tmp_dma; u32 flags; u32 resp_len, req_len, i, tag = tei->tag; const u32 max_resp_len = SB_RFB_MAX; u32 phy_mask; slot = &mvi->slot_info[tag]; phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : sas_port->phy_mask) & TXQ_PHY_MASK; slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | (phy_mask << TXQ_PHY_SHIFT)); flags = MCH_RETRY; if (task->ssp_task.enable_first_burst) { flags |= MCH_FBURST; fburst = (1 << 7); } if (is_tmf) flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); else flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ buf_cmd = buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_SSP_CMD_SZ; buf_tmp_dma += MVS_SSP_CMD_SZ; /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - sizeof(struct mvs_err_info) - i; resp_len = min(resp_len, max_resp_len); req_len = sizeof(struct ssp_frame_hdr) + 28; /* request, response lengths */ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); /* generate open address frame hdr (first 12 bytes) */ /* initiator, SSP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in SSP frame header (Command Table.SSP frame header) */ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; if (is_tmf) ssp_hdr->frame_type = SSP_TASK; else ssp_hdr->frame_type = SSP_COMMAND; memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(ssp_hdr->hashed_src_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); ssp_hdr->tag = cpu_to_be16(tag); /* fill in IU for TASK and Command Frame */ buf_cmd += sizeof(*ssp_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (ssp_hdr->frame_type != SSP_TASK) { buf_cmd[9] = fburst | task->ssp_task.task_attr | (task->ssp_task.task_prio << 3); memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); } else{ buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: buf_cmd[12] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; buf_cmd[13] = tmf->tag_of_task_to_be_managed & 0xff; break; default: break; } } /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; } #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_task_exec_info tei; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, n_elem = 0; int rc = 0; if (!dev->port) { struct task_status_struct *tsm = &task->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; /* * libsas will use dev->port, should * not call task_done for sata */ if (dev->dev_type != SATA_DEV) task->task_done(task); return rc; } if (DEV_IS_GONE(mvi_dev)) { if (mvi_dev) mv_dprintk("device %d not ready.\n", mvi_dev->device_id); else mv_dprintk("device %016llx not ready.\n", SAS_ADDR(dev->sas_addr)); rc = SAS_PHY_DOWN; return rc; } tei.port = dev->port->lldd_port; if (tei.port && !tei.port->port_attached && !tmf) { if (sas_protocol_ata(task->task_proto)) { struct task_status_struct *ts = &task->task_status; mv_dprintk("SATA/STP port %d does not attach" "device.\n", dev->port->id); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PHY_DOWN; task->task_done(task); } else { struct task_status_struct *ts = &task->task_status; mv_dprintk("SAS port %d does not attach" "device.\n", dev->port->id); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; task->task_done(task); } return rc; } if (!sas_protocol_ata(task->task_proto)) { if (task->num_scatter) { n_elem = dma_map_sg(mvi->dev, task->scatter, task->num_scatter, task->data_dir); if (!n_elem) { rc = -ENOMEM; goto prep_out; } } } else { n_elem = task->num_scatter; } rc = mvs_tag_alloc(mvi, &tag); if (rc) goto err_out; slot = &mvi->slot_info[tag]; task->lldd_task = NULL; slot->n_elem = n_elem; slot->slot_tag = tag; slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); if (!slot->buf) goto err_out_tag; memset(slot->buf, 0, MVS_SLOT_BUF_SZ); tei.task = task; tei.hdr = &mvi->slot[tag]; tei.tag = tag; tei.n_elem = n_elem; switch (task->task_proto) { case SAS_PROTOCOL_SMP: rc = mvs_task_prep_smp(mvi, &tei); break; case SAS_PROTOCOL_SSP: rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = mvs_task_prep_ata(mvi, &tei); break; default: dev_printk(KERN_ERR, mvi->dev, "unknown sas_task proto: 0x%x\n", task->task_proto); rc = -EINVAL; break; } if (rc) { mv_dprintk("rc is %x\n", rc); goto err_out_slot_buf; } slot->task = task; slot->port = tei.port; task->lldd_task = slot; list_add_tail(&slot->entry, &tei.port->list); spin_lock(&task->task_state_lock); task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&task->task_state_lock); mvi_dev->running_req++; ++(*pass); mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); return rc; err_out_slot_buf: pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); err_out_tag: mvs_tag_free(mvi, tag); err_out: dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); if (!sas_protocol_ata(task->task_proto)) if (n_elem) dma_unmap_sg(mvi->dev, task->scatter, n_elem, task->data_dir); prep_out: return rc; } static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) { struct mvs_task_list *first = NULL; for (; *num > 0; --*num) { struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags); if (!mvs_list) break; INIT_LIST_HEAD(&mvs_list->list); if (!first) first = mvs_list; else list_add_tail(&mvs_list->list, &first->list); } return first; } static inline void mvs_task_free_list(struct mvs_task_list *mvs_list) { LIST_HEAD(list); struct list_head *pos, *a; struct mvs_task_list *mlist = NULL; __list_add(&list, mvs_list->list.prev, &mvs_list->list); list_for_each_safe(pos, a, &list) { list_del_init(pos); mlist = list_entry(pos, struct mvs_task_list, list); kmem_cache_free(mvs_task_list_cache, mlist); } } static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, struct completion *completion, int is_tmf, struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; struct mvs_info *mvi = NULL; u32 rc = 0; u32 pass = 0; unsigned long flags = 0; mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; spin_lock_irqsave(&mvi->lock, flags); rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); if (rc) dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); if (likely(pass)) MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); spin_unlock_irqrestore(&mvi->lock, flags); return rc; } static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, struct completion *completion, int is_tmf, struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; struct mvs_prv_info *mpi = dev->port->ha->lldd_ha; struct mvs_info *mvi = NULL; struct sas_task *t = task; struct mvs_task_list *mvs_list = NULL, *a; LIST_HEAD(q); int pass[2] = {0}; u32 rc = 0; u32 n = num; unsigned long flags = 0; mvs_list = mvs_task_alloc_list(&n, gfp_flags); if (n) { printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__); rc = -ENOMEM; goto free_list; } __list_add(&q, mvs_list->list.prev, &mvs_list->list); list_for_each_entry(a, &q, list) { a->task = t; t = list_entry(t->list.next, struct sas_task, list); } list_for_each_entry(a, &q , list) { t = a->task; mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info; spin_lock_irqsave(&mvi->lock, flags); rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]); if (rc) dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); spin_unlock_irqrestore(&mvi->lock, flags); } if (likely(pass[0])) MVS_CHIP_DISP->start_delivery(mpi->mvi[0], (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); if (likely(pass[1])) MVS_CHIP_DISP->start_delivery(mpi->mvi[1], (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); list_del_init(&q); free_list: if (mvs_list) mvs_task_free_list(mvs_list); return rc; } int mvs_queue_command(struct sas_task *task, const int num, gfp_t gfp_flags) { struct mvs_device *mvi_dev = task->dev->lldd_dev; struct sas_ha_struct *sas = mvi_dev->mvi_info->sas; if (sas->lldd_max_execute_num < 2) return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); else return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL); } static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; mvs_tag_clear(mvi, slot_idx); } static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, struct mvs_slot_info *slot, u32 slot_idx) { if (!slot->task) return; if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(mvi->dev, task->scatter, slot->n_elem, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SSP: default: /* do nothing */ break; } if (slot->buf) { pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); slot->buf = NULL; } list_del_init(&slot->entry); task->lldd_task = NULL; slot->task = NULL; slot->port = NULL; slot->slot_tag = 0xFFFFFFFF; mvs_slot_free(mvi, slot_idx); } static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) { struct mvs_phy *phy = &mvi->phy[phy_no]; struct mvs_port *port = phy->port; int j, no; for_each_phy(port->wide_port_phymap, j, no) { if (j & 1) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); MVS_CHIP_DISP->write_port_cfg_data(mvi, no, port->wide_port_phymap); } else { MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 0); } } } static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; struct mvs_port *port = phy->port; tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { if (!port) phy->phy_attached = 1; return tmp; } if (port) { if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap &= ~(1U << i); if (!port->wide_port_phymap) port->port_attached = 0; mvs_update_wideport(mvi, i); } else if (phy->phy_type & PORT_TYPE_SATA) port->port_attached = 0; phy->port = NULL; phy->phy_attached = 0; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); } return 0; } static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) { u32 *s = (u32 *) buf; if (!s) return NULL; MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); return s; } static u32 mvs_is_sig_fis_received(u32 irq_status) { return irq_status & PHYEV_SIG_FIS; } static void mvs_sig_remove_timer(struct mvs_phy *phy) { if (phy->timer.function) del_timer(&phy->timer); phy->timer.function = NULL; } void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) { struct mvs_phy *phy = &mvi->phy[i]; struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; if (get_st) { phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); phy->phy_status = mvs_is_phy_ready(mvi, i); } if (phy->phy_status) { int oob_done = 0; struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; oob_done = MVS_CHIP_DISP->oob_done(mvi, i); MVS_CHIP_DISP->fix_phy_info(mvi, i, id); if (phy->phy_type & PORT_TYPE_SATA) { phy->identify.target_port_protocols = SAS_PROTOCOL_STP; if (mvs_is_sig_fis_received(phy->irq_status)) { mvs_sig_remove_timer(phy); phy->phy_attached = 1; phy->att_dev_sas_addr = i + mvi->id * mvi->chip->n_phy; if (oob_done) sas_phy->oob_mode = SATA_OOB_MODE; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); mvs_get_d2h_reg(mvi, i, id); } else { u32 tmp; dev_printk(KERN_DEBUG, mvi->dev, "Phy%d : No sig fis\n", i); tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); MVS_CHIP_DISP->write_port_irq_mask(mvi, i, tmp | PHYEV_SIG_FIS); phy->phy_attached = 0; phy->phy_type &= ~PORT_TYPE_SATA; goto out_done; } } else if (phy->phy_type & PORT_TYPE_SAS || phy->att_dev_info & PORT_SSP_INIT_MASK) { phy->phy_attached = 1; phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; if (phy->identify.device_type == SAS_END_DEV) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != NO_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) sas_phy->oob_mode = SAS_OOB_MODE; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); } memcpy(sas_phy->attached_sas_addr, &phy->att_dev_sas_addr, SAS_ADDR_SIZE); if (MVS_CHIP_DISP->phy_work_around) MVS_CHIP_DISP->phy_work_around(mvi, i); } mv_dprintk("phy %d attach dev info is %x\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); mv_dprintk("phy %d attach sas addr is %llx\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); out_done: if (get_st) MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); } static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) { struct sas_ha_struct *sas_ha = sas_phy->ha; struct mvs_info *mvi = NULL; int i = 0, hi; struct mvs_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; struct mvs_port *port; unsigned long flags = 0; if (!sas_port) return; while (sas_ha->sas_phy[i]) { if (sas_ha->sas_phy[i] == sas_phy) break; i++; } hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; if (i >= mvi->chip->n_phy) port = &mvi->port[i - mvi->chip->n_phy]; else port = &mvi->port[i]; if (lock) spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; phy->port = port; sas_port->lldd_port = port; if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap = sas_port->phy_mask; mv_printk("set wide port phy map %x\n", sas_port->phy_mask); mvs_update_wideport(mvi, sas_phy->id); /* direct attached SAS device */ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); } } if (lock) spin_unlock_irqrestore(&mvi->lock, flags); } static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) { struct domain_device *dev; struct mvs_phy *phy = sas_phy->lldd_phy; struct mvs_info *mvi = phy->mvi; struct asd_sas_port *port = sas_phy->port; int phy_no = 0; while (phy != &mvi->phy[phy_no]) { phy_no++; if (phy_no >= MVS_MAX_PHYS) return; } list_for_each_entry(dev, &port->dev_list, dev_list_node) mvs_do_release_task(phy->mvi, phy_no, dev); } void mvs_port_formed(struct asd_sas_phy *sas_phy) { mvs_port_notify_formed(sas_phy, 1); } void mvs_port_deformed(struct asd_sas_phy *sas_phy) { mvs_port_notify_deformed(sas_phy, 1); } struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { if (mvi->devices[dev].dev_type == NO_DEVICE) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } } if (dev == MVS_MAX_DEVICES) mv_printk("max support %d devices, ignore ..\n", MVS_MAX_DEVICES); return NULL; } void mvs_free_dev(struct mvs_device *mvi_dev) { u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; mvi_dev->dev_type = NO_DEVICE; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } int mvs_dev_found_notify(struct domain_device *dev, int lock) { unsigned long flags = 0; int res = 0; struct mvs_info *mvi = NULL; struct domain_device *parent_dev = dev->parent; struct mvs_device *mvi_device; mvi = mvs_find_dev_mvi(dev); if (lock) spin_lock_irqsave(&mvi->lock, flags); mvi_device = mvs_alloc_dev(mvi); if (!mvi_device) { res = -1; goto found_out; } dev->lldd_dev = mvi_device; mvi_device->dev_status = MVS_DEV_NORMAL; mvi_device->dev_type = dev->dev_type; mvi_device->mvi_info = mvi; mvi_device->sas_device = dev; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { int phy_id; u8 phy_num = parent_dev->ex_dev.num_phys; struct ex_phy *phy; for (phy_id = 0; phy_id < phy_num; phy_id++) { phy = &parent_dev->ex_dev.ex_phy[phy_id]; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(dev->sas_addr)) { mvi_device->attached_phy = phy_id; break; } } if (phy_id == phy_num) { mv_printk("Error: no attached dev:%016llx" "at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); res = -1; } } found_out: if (lock) spin_unlock_irqrestore(&mvi->lock, flags); return res; } int mvs_dev_found(struct domain_device *dev) { return mvs_dev_found_notify(dev, 1); } void mvs_dev_gone_notify(struct domain_device *dev) { unsigned long flags = 0; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; spin_lock_irqsave(&mvi->lock, flags); if (mvi_dev) { mv_dprintk("found dev[%d:%x] is gone.\n", mvi_dev->device_id, mvi_dev->dev_type); mvs_release_task(mvi, dev); mvs_free_reg_set(mvi, mvi_dev); mvs_free_dev(mvi_dev); } else { mv_dprintk("found dev has gone.\n"); } dev->lldd_dev = NULL; mvi_dev->sas_device = NULL; spin_unlock_irqrestore(&mvi->lock, flags); } void mvs_dev_gone(struct domain_device *dev) { mvs_dev_gone_notify(dev); } static void mvs_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) return; complete(&task->completion); } static void mvs_tmf_timedout(unsigned long data) { struct sas_task *task = (struct sas_task *)data; task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->completion); } #define MVS_TASK_TIMEOUT 20 static int mvs_exec_internal_tmf_task(struct domain_device *dev, void *parameter, u32 para_len, struct mvs_tmf_task *tmf) { int res, retry; struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; memcpy(&task->ssp_task, parameter, para_len); task->task_done = mvs_task_done; task->timer.data = (unsigned long) task; task->timer.function = mvs_tmf_timedout; task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; add_timer(&task->timer); res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); if (res) { del_timer(&task->timer); mv_printk("executing internel task failed:%d\n", res); goto ex_err; } wait_for_completion(&task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { mv_printk("TMF task[%x] timeout.\n", tmf->tmf); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { mv_dprintk("blocked task error.\n"); res = -EMSGSIZE; break; } else { mv_dprintk(" task to dev %016llx response: 0x%x " "status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, u8 *lun, struct mvs_tmf_task *tmf) { struct sas_ssp_task ssp_task; if (!(dev->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; memcpy(ssp_task.LUN, lun, 8); return mvs_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), tmf); } /* Standard mandates link reset for ATA (type 0) and hard reset for SSP (type 1) , only for RECOVERY */ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); int reset_type = (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); msleep(2000); return rc; } /* mandatory SAM-3 */ int mvs_lu_reset(struct domain_device *dev, u8 *lun) { unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; struct mvs_device * mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; tmf_task.tmf = TMF_LU_RESET; mvi_dev->dev_status = MVS_DEV_EH; rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); if (rc == TMF_RESP_FUNC_COMPLETE) { spin_lock_irqsave(&mvi->lock, flags); mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); } /* If failed, fall-through I_T_Nexus reset */ mv_printk("%s for device[%x]:rc= %d\n", __func__, mvi_dev->device_id, rc); return rc; } int mvs_I_T_nexus_reset(struct domain_device *dev) { unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; if (mvi_dev->dev_status != MVS_DEV_EH) return TMF_RESP_FUNC_COMPLETE; else mvi_dev->dev_status = MVS_DEV_NORMAL; rc = mvs_debug_I_T_nexus_reset(dev); mv_printk("%s for device[%x]:rc= %d\n", __func__, mvi_dev->device_id, rc); spin_lock_irqsave(&mvi->lock, flags); mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); return rc; } /* optional SAM-3 */ int mvs_query_task(struct sas_task *task) { u32 tag; struct scsi_lun lun; struct mvs_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; int_to_scsilun(cmnd->device->lun, &lun); rc = mvs_find_tag(mvi, task, &tag); if (rc == 0) { rc = TMF_RESP_FUNC_FAILED; return rc; } tmf_task.tmf = TMF_QUERY_TASK; tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); switch (rc) { /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: break; } } mv_printk("%s:rc= %d\n", __func__, rc); return rc; } /* mandatory SAM-3, still need free task/slot info */ int mvs_abort_task(struct sas_task *task) { struct scsi_lun lun; struct mvs_tmf_task tmf_task; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; u32 tag; if (!mvi_dev) { mv_printk("Device has removed\n"); return TMF_RESP_FUNC_FAILED; } mvi = mvi_dev->mvi_info; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } spin_unlock_irqrestore(&task->task_state_lock, flags); mvi_dev->dev_status = MVS_DEV_EH; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; int_to_scsilun(cmnd->device->lun, &lun); rc = mvs_find_tag(mvi, task, &tag); if (rc == 0) { mv_printk("No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } tmf_task.tmf = TMF_ABORT_TASK; tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); /* if successful, clear the task and callback forwards.*/ if (rc == TMF_RESP_FUNC_COMPLETE) { u32 slot_no; struct mvs_slot_info *slot; if (task->lldd_task) { slot = task->lldd_task; slot_no = (u32) (slot - mvi->slot_info); spin_lock_irqsave(&mvi->lock, flags); mvs_slot_complete(mvi, slot_no, 1); spin_unlock_irqrestore(&mvi->lock, flags); } } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " "slot=%p slot_idx=x%x\n", mvi, task, slot, slot_idx); mvs_tmf_timedout((unsigned long)task); mvs_slot_task_free(mvi, task, slot, slot_idx); rc = TMF_RESP_FUNC_COMPLETE; goto out; } } out: if (rc != TMF_RESP_FUNC_COMPLETE) mv_printk("%s:rc= %d\n", __func__, rc); return rc; } int mvs_abort_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; tmf_task.tmf = TMF_ABORT_TASK_SET; rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int mvs_clear_aca(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_ACA; rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int mvs_clear_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_TASK_SET; rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx, int err) { struct mvs_device *mvi_dev = task->dev->lldd_dev; struct task_status_struct *tstat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; int stat = SAM_STAT_GOOD; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), sizeof(struct dev_to_host_fis)); tstat->buf_valid_size = sizeof(*resp); if (unlikely(err)) { if (unlikely(err & CMD_ISS_STPD)) stat = SAS_OPEN_REJECT; else stat = SAS_PROTO_RESPONSE; } return stat; } void mvs_set_sense(u8 *buffer, int len, int d_sense, int key, int asc, int ascq) { memset(buffer, 0, len); if (d_sense) { /* Descriptor format */ if (len < 4) { mv_printk("Length %d of sense buffer too small to " "fit sense %x:%x:%x", len, key, asc, ascq); } buffer[0] = 0x72; /* Response Code */ if (len > 1) buffer[1] = key; /* Sense Key */ if (len > 2) buffer[2] = asc; /* ASC */ if (len > 3) buffer[3] = ascq; /* ASCQ */ } else { if (len < 14) { mv_printk("Length %d of sense buffer too small to " "fit sense %x:%x:%x", len, key, asc, ascq); } buffer[0] = 0x70; /* Response Code */ if (len > 2) buffer[2] = key; /* Sense Key */ if (len > 7) buffer[7] = 0x0a; /* Additional Sense Length */ if (len > 12) buffer[12] = asc; /* ASC */ if (len > 13) buffer[13] = ascq; /* ASCQ */ } return; } void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, u8 key, u8 asc, u8 asc_q) { iu->datapres = 2; iu->response_data_len = 0; iu->sense_data_len = 17; iu->status = 02; mvs_set_sense(iu->sense_data, 17, 0, key, asc, asc_q); } static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx) { struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; int stat; u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); u32 tfs = 0; enum mvs_port_type type = PORT_TYPE_SAS; if (err_dw0 & CMD_ISS_STPD) MVS_CHIP_DISP->issue_stop(mvi, type, tfs); MVS_CHIP_DISP->command_active(mvi, slot_idx); stat = SAM_STAT_CHECK_CONDITION; switch (task->task_proto) { case SAS_PROTOCOL_SSP: { stat = SAS_ABORTED_TASK; if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { struct ssp_response_iu *iu = slot->response + sizeof(struct mvs_err_info); mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); sas_ssp_task_response(mvi->dev, task, iu); stat = SAM_STAT_CHECK_CONDITION; } if (err_dw1 & bit(31)) mv_printk("reuse same slot, retry command.\n"); break; } case SAS_PROTOCOL_SMP: stat = SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { task->ata_task.use_ncq = 0; stat = SAS_PROTO_RESPONSE; mvs_sata_done(mvi, task, slot_idx, err_dw0); } break; default: break; } return stat; } int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; struct sas_task *task = slot->task; struct mvs_device *mvi_dev = NULL; struct task_status_struct *tstat; struct domain_device *dev; u32 aborted; void *to; enum exec_status sts; if (unlikely(!task || !task->lldd_task || !task->dev)) return -1; tstat = &task->task_status; dev = task->dev; mvi_dev = dev->lldd_dev; spin_lock(&task->task_state_lock); task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); task->task_state_flags |= SAS_TASK_STATE_DONE; /* race condition*/ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; spin_unlock(&task->task_state_lock); memset(tstat, 0, sizeof(*tstat)); tstat->resp = SAS_TASK_COMPLETE; if (unlikely(aborted)) { tstat->stat = SAS_ABORTED_TASK; if (mvi_dev && mvi_dev->running_req) mvi_dev->running_req--; if (sas_protocol_ata(task->task_proto)) mvs_free_reg_set(mvi, mvi_dev); mvs_slot_task_free(mvi, task, slot, slot_idx); return -1; } /* when no device attaching, go ahead and complete by error handling*/ if (unlikely(!mvi_dev || flags)) { if (!mvi_dev) mv_dprintk("port has not device.\n"); tstat->stat = SAS_PHY_DOWN; goto out; } /* error info record present */ if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { mv_dprintk("port %d slot %d rx_desc %X has error info" "%016llX.\n", slot->port->sas_port.id, slot_idx, rx_desc, (u64)(*(u64 *)slot->response)); tstat->stat = mvs_slot_err(mvi, task, slot_idx); tstat->resp = SAS_TASK_COMPLETE; goto out; } switch (task->task_proto) { case SAS_PROTOCOL_SSP: /* hw says status == 0, datapres == 0 */ if (rx_desc & RXQ_GOOD) { tstat->stat = SAM_STAT_GOOD; tstat->resp = SAS_TASK_COMPLETE; } /* response frame present */ else if (rx_desc & RXQ_RSP) { struct ssp_response_iu *iu = slot->response + sizeof(struct mvs_err_info); sas_ssp_task_response(mvi->dev, task, iu); } else tstat->stat = SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; tstat->stat = SAM_STAT_GOOD; to = kmap_atomic(sg_page(sg_resp)); memcpy(to + sg_resp->offset, slot->response + sizeof(struct mvs_err_info), sg_dma_len(sg_resp)); kunmap_atomic(to); break; } case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); break; } default: tstat->stat = SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); tstat->stat = SAS_PHY_DOWN; } out: if (mvi_dev && mvi_dev->running_req) { mvi_dev->running_req--; if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) mvs_free_reg_set(mvi, mvi_dev); } mvs_slot_task_free(mvi, task, slot, slot_idx); sts = tstat->stat; spin_unlock(&mvi->lock); if (task->task_done) task->task_done(task); spin_lock(&mvi->lock); return sts; } void mvs_do_release_task(struct mvs_info *mvi, int phy_no, struct domain_device *dev) { u32 slot_idx; struct mvs_phy *phy; struct mvs_port *port; struct mvs_slot_info *slot, *slot2; phy = &mvi->phy[phy_no]; port = phy->port; if (!port) return; /* clean cmpl queue in case request is already finished */ mvs_int_rx(mvi, false); list_for_each_entry_safe(slot, slot2, &port->list, entry) { struct sas_task *task; slot_idx = (u32) (slot - mvi->slot_info); task = slot->task; if (dev && task->dev != dev) continue; mv_printk("Release slot [%x] tag[%x], task [%p]:\n", slot_idx, slot->slot_tag, task); MVS_CHIP_DISP->command_active(mvi, slot_idx); mvs_slot_complete(mvi, slot_idx, 1); } } void mvs_release_task(struct mvs_info *mvi, struct domain_device *dev) { int i, phyno[WIDE_PORT_MAX_PHY], num; num = mvs_find_dev_phyno(dev, phyno); for (i = 0; i < num; i++) mvs_do_release_task(mvi, phyno[i], dev); } static void mvs_phy_disconnected(struct mvs_phy *phy) { phy->phy_attached = 0; phy->att_dev_info = 0; phy->att_dev_sas_addr = 0; } static void mvs_work_queue(struct work_struct *work) { struct delayed_work *dw = container_of(work, struct delayed_work, work); struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); struct mvs_info *mvi = mwq->mvi; unsigned long flags; u32 phy_no = (unsigned long) mwq->data; struct sas_ha_struct *sas_ha = mvi->sas; struct mvs_phy *phy = &mvi->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; spin_lock_irqsave(&mvi->lock, flags); if (mwq->handler & PHY_PLUG_EVENT) { if (phy->phy_event & PHY_PLUG_OUT) { u32 tmp; struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); phy->phy_event &= ~PHY_PLUG_OUT; if (!(tmp & PHY_READY_MASK)) { sas_phy_disconnected(sas_phy); mvs_phy_disconnected(phy); sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); mv_dprintk("phy%d Removed Device\n", phy_no); } else { MVS_CHIP_DISP->detect_porttype(mvi, phy_no); mvs_update_phyinfo(mvi, phy_no, 1); mvs_bytes_dmaed(mvi, phy_no); mvs_port_notify_formed(sas_phy, 0); mv_dprintk("phy%d Attached Device\n", phy_no); } } } else if (mwq->handler & EXP_BRCT_CHG) { phy->phy_event &= ~EXP_BRCT_CHG; sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); mv_dprintk("phy%d Got Broadcast Change\n", phy_no); } list_del(&mwq->entry); spin_unlock_irqrestore(&mvi->lock, flags); kfree(mwq); } static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) { struct mvs_wq *mwq; int ret = 0; mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); if (mwq) { mwq->mvi = mvi; mwq->data = data; mwq->handler = handler; MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); list_add_tail(&mwq->entry, &mvi->wq_list); schedule_delayed_work(&mwq->work_q, HZ * 2); } else ret = -ENOMEM; return ret; } static void mvs_sig_time_out(unsigned long tphy) { struct mvs_phy *phy = (struct mvs_phy *)tphy; struct mvs_info *mvi = phy->mvi; u8 phy_no; for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { if (&mvi->phy[phy_no] == phy) { mv_dprintk("Get signature time out, reset phy %d\n", phy_no+mvi->id*mvi->chip->n_phy); MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); } } } void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) { u32 tmp; struct mvs_phy *phy = &mvi->phy[phy_no]; phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, phy->irq_status); /* * events is port event now , * we need check the interrupt status which belongs to per port. */ if (phy->irq_status & PHYEV_DCDR_ERR) { mv_dprintk("phy %d STP decoding error.\n", phy_no + mvi->id*mvi->chip->n_phy); } if (phy->irq_status & PHYEV_POOF) { mdelay(500); if (!(phy->phy_event & PHY_PLUG_OUT)) { int dev_sata = phy->phy_type & PORT_TYPE_SATA; int ready; mvs_do_release_task(mvi, phy_no, NULL); phy->phy_event |= PHY_PLUG_OUT; MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); mvs_handle_event(mvi, (void *)(unsigned long)phy_no, PHY_PLUG_EVENT); ready = mvs_is_phy_ready(mvi, phy_no); if (ready || dev_sata) { if (MVS_CHIP_DISP->stp_reset) MVS_CHIP_DISP->stp_reset(mvi, phy_no); else MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_SOFT_RESET); return; } } } if (phy->irq_status & PHYEV_COMWAKE) { tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, tmp | PHYEV_SIG_FIS); if (phy->timer.function == NULL) { phy->timer.data = (unsigned long)phy; phy->timer.function = mvs_sig_time_out; phy->timer.expires = jiffies + 5*HZ; add_timer(&phy->timer); } } if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { phy->phy_status = mvs_is_phy_ready(mvi, phy_no); mv_dprintk("notify plug in on phy[%d]\n", phy_no); if (phy->phy_status) { mdelay(10); MVS_CHIP_DISP->detect_porttype(mvi, phy_no); if (phy->phy_type & PORT_TYPE_SATA) { tmp = MVS_CHIP_DISP->read_port_irq_mask( mvi, phy_no); tmp &= ~PHYEV_SIG_FIS; MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, tmp); } mvs_update_phyinfo(mvi, phy_no, 0); if (phy->phy_type & PORT_TYPE_SAS) { MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); mdelay(10); } mvs_bytes_dmaed(mvi, phy_no); /* whether driver is going to handle hot plug */ if (phy->phy_event & PHY_PLUG_OUT) { mvs_port_notify_formed(&phy->sas_phy, 0); phy->phy_event &= ~PHY_PLUG_OUT; } } else { mv_dprintk("plugin interrupt but phy%d is gone\n", phy_no + mvi->id*mvi->chip->n_phy); } } else if (phy->irq_status & PHYEV_BROAD_CH) { mv_dprintk("phy %d broadcast change.\n", phy_no + mvi->id*mvi->chip->n_phy); mvs_handle_event(mvi, (void *)(unsigned long)phy_no, EXP_BRCT_CHG); } } int mvs_int_rx(struct mvs_info *mvi, bool self_clear) { u32 rx_prod_idx, rx_desc; bool attn = false; /* the first dword in the RX ring is special: it contains * a mirror of the hardware's RX producer index, so that * we don't have to stall the CPU reading that register. * The actual RX ring is offset by one dword, due to this. */ rx_prod_idx = mvi->rx_cons; mvi->rx_cons = le32_to_cpu(mvi->rx[0]); if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ return 0; /* The CMPL_Q may come late, read from register and try again * note: if coalescing is enabled, * it will need to read from register every time for sure */ if (unlikely(mvi->rx_cons == rx_prod_idx)) mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; if (mvi->rx_cons == rx_prod_idx) return 0; while (mvi->rx_cons != rx_prod_idx) { /* increment our internal RX consumer pointer */ rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); if (likely(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); if (rx_desc & RXQ_ATTN) { attn = true; } else if (rx_desc & RXQ_ERR) { if (!(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); } else if (rx_desc & RXQ_SLOT_RESET) { mvs_slot_free(mvi, rx_desc); } } if (attn && self_clear) MVS_CHIP_DISP->int_full(mvi); return 0; }
gpl-2.0
chasmodo/android_kernel_oneplus_msm8974
drivers/media/rc/lirc_dev.c
4596
18578
/* * LIRC base driver * * by Artur Lipowski <alipowski@interia.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/ioctl.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/kthread.h> #include <linux/bitops.h> #include <linux/device.h> #include <linux/cdev.h> #include <media/lirc.h> #include <media/lirc_dev.h> static bool debug; #define IRCTL_DEV_NAME "BaseRemoteCtl" #define NOPLUG -1 #define LOGHEAD "lirc_dev (%s[%d]): " static dev_t lirc_base_dev; struct irctl { struct lirc_driver d; int attached; int open; struct mutex irctl_lock; struct lirc_buffer *buf; unsigned int chunk_size; struct cdev *cdev; struct task_struct *task; long jiffies_to_wait; }; static DEFINE_MUTEX(lirc_dev_lock); static struct irctl *irctls[MAX_IRCTL_DEVICES]; /* Only used for sysfs but defined to void otherwise */ static struct class *lirc_class; /* helper function * initializes the irctl structure */ static void lirc_irctl_init(struct irctl *ir) { mutex_init(&ir->irctl_lock); ir->d.minor = NOPLUG; } static void lirc_irctl_cleanup(struct irctl *ir) { dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor); device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); if (ir->buf != ir->d.rbuf) { lirc_buffer_free(ir->buf); kfree(ir->buf); } ir->buf = NULL; } /* helper function * reads key codes from driver and puts them into buffer * returns 0 on success */ static int lirc_add_to_buf(struct irctl *ir) { if (ir->d.add_to_buf) { int res = -ENODATA; int got_data = 0; /* * service the device as long as it is returning * data and we have space */ get_data: res = ir->d.add_to_buf(ir->d.data, ir->buf); if (res == 0) { got_data++; goto get_data; } if (res == -ENODEV) kthread_stop(ir->task); return got_data ? 0 : res; } return 0; } /* main function of the polling thread */ static int lirc_thread(void *irctl) { struct irctl *ir = irctl; dev_dbg(ir->d.dev, LOGHEAD "poll thread started\n", ir->d.name, ir->d.minor); do { if (ir->open) { if (ir->jiffies_to_wait) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(ir->jiffies_to_wait); } if (kthread_should_stop()) break; if (!lirc_add_to_buf(ir)) wake_up_interruptible(&ir->buf->wait_poll); } else { set_current_state(TASK_INTERRUPTIBLE); schedule(); } } while (!kthread_should_stop()); dev_dbg(ir->d.dev, LOGHEAD "poll thread ended\n", ir->d.name, ir->d.minor); return 0; } static struct file_operations lirc_dev_fops = { .owner = THIS_MODULE, .read = lirc_dev_fop_read, .write = lirc_dev_fop_write, .poll = lirc_dev_fop_poll, .unlocked_ioctl = lirc_dev_fop_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lirc_dev_fop_ioctl, #endif .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, .llseek = noop_llseek, }; static int lirc_cdev_add(struct irctl *ir) { int retval = -ENOMEM; struct lirc_driver *d = &ir->d; struct cdev *cdev; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) goto err_out; if (d->fops) { cdev_init(cdev, d->fops); cdev->owner = d->owner; } else { cdev_init(cdev, &lirc_dev_fops); cdev->owner = THIS_MODULE; } retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor); if (retval) goto err_out; retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1); if (retval) { kobject_put(&cdev->kobj); goto err_out; } ir->cdev = cdev; return 0; err_out: kfree(cdev); return retval; } int lirc_register_driver(struct lirc_driver *d) { struct irctl *ir; int minor; int bytes_in_key; unsigned int chunk_size; unsigned int buffer_size; int err; if (!d) { printk(KERN_ERR "lirc_dev: lirc_register_driver: " "driver pointer must be not NULL!\n"); err = -EBADRQC; goto out; } if (!d->dev) { printk(KERN_ERR "%s: dev pointer not filled in!\n", __func__); err = -EINVAL; goto out; } if (MAX_IRCTL_DEVICES <= d->minor) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "\"minor\" must be between 0 and %d (%d)!\n", MAX_IRCTL_DEVICES - 1, d->minor); err = -EBADRQC; goto out; } if (1 > d->code_length || (BUFLEN * 8) < d->code_length) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "code length in bits for minor (%d) " "must be less than %d!\n", d->minor, BUFLEN * 8); err = -EBADRQC; goto out; } dev_dbg(d->dev, "lirc_dev: lirc_register_driver: sample_rate: %d\n", d->sample_rate); if (d->sample_rate) { if (2 > d->sample_rate || HZ < d->sample_rate) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "sample_rate must be between 2 and %d!\n", HZ); err = -EBADRQC; goto out; } if (!d->add_to_buf) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "add_to_buf cannot be NULL when " "sample_rate is set\n"); err = -EBADRQC; goto out; } } else if (!(d->fops && d->fops->read) && !d->rbuf) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "fops->read and rbuf cannot all be NULL!\n"); err = -EBADRQC; goto out; } else if (!d->rbuf) { if (!(d->fops && d->fops->read && d->fops->poll && d->fops->unlocked_ioctl)) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "neither read, poll nor unlocked_ioctl can be NULL!\n"); err = -EBADRQC; goto out; } } mutex_lock(&lirc_dev_lock); minor = d->minor; if (minor < 0) { /* find first free slot for driver */ for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++) if (!irctls[minor]) break; if (MAX_IRCTL_DEVICES == minor) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "no free slots for drivers!\n"); err = -ENOMEM; goto out_lock; } } else if (irctls[minor]) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "minor (%d) just registered!\n", minor); err = -EBUSY; goto out_lock; } ir = kzalloc(sizeof(struct irctl), GFP_KERNEL); if (!ir) { err = -ENOMEM; goto out_lock; } lirc_irctl_init(ir); irctls[minor] = ir; d->minor = minor; if (d->sample_rate) { ir->jiffies_to_wait = HZ / d->sample_rate; } else { /* it means - wait for external event in task queue */ ir->jiffies_to_wait = 0; } /* some safety check 8-) */ d->name[sizeof(d->name)-1] = '\0'; bytes_in_key = BITS_TO_LONGS(d->code_length) + (d->code_length % 8 ? 1 : 0); buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key; chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key; if (d->rbuf) { ir->buf = d->rbuf; } else { ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); if (!ir->buf) { err = -ENOMEM; goto out_lock; } err = lirc_buffer_init(ir->buf, chunk_size, buffer_size); if (err) { kfree(ir->buf); goto out_lock; } } ir->chunk_size = ir->buf->chunk_size; if (d->features == 0) d->features = LIRC_CAN_REC_LIRCCODE; ir->d = *d; device_create(lirc_class, ir->d.dev, MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL, "lirc%u", ir->d.minor); if (d->sample_rate) { /* try to fire up polling thread */ ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev"); if (IS_ERR(ir->task)) { dev_err(d->dev, "lirc_dev: lirc_register_driver: " "cannot run poll thread for minor = %d\n", d->minor); err = -ECHILD; goto out_sysfs; } } err = lirc_cdev_add(ir); if (err) goto out_sysfs; ir->attached = 1; mutex_unlock(&lirc_dev_lock); dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n", ir->d.name, ir->d.minor); return minor; out_sysfs: device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); out_lock: mutex_unlock(&lirc_dev_lock); out: return err; } EXPORT_SYMBOL(lirc_register_driver); int lirc_unregister_driver(int minor) { struct irctl *ir; struct cdev *cdev; if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between " "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1); return -EBADRQC; } ir = irctls[minor]; if (!ir) { printk(KERN_ERR "lirc_dev: %s: failed to get irctl struct " "for minor %d!\n", __func__, minor); return -ENOENT; } cdev = ir->cdev; mutex_lock(&lirc_dev_lock); if (ir->d.minor != minor) { printk(KERN_ERR "lirc_dev: %s: minor (%d) device not " "registered!\n", __func__, minor); mutex_unlock(&lirc_dev_lock); return -ENOENT; } /* end up polling thread */ if (ir->task) kthread_stop(ir->task); dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n", ir->d.name, ir->d.minor); ir->attached = 0; if (ir->open) { dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n", ir->d.name, ir->d.minor); wake_up_interruptible(&ir->buf->wait_poll); mutex_lock(&ir->irctl_lock); ir->d.set_use_dec(ir->d.data); module_put(cdev->owner); mutex_unlock(&ir->irctl_lock); } else { lirc_irctl_cleanup(ir); cdev_del(cdev); kfree(cdev); kfree(ir); irctls[minor] = NULL; } mutex_unlock(&lirc_dev_lock); return 0; } EXPORT_SYMBOL(lirc_unregister_driver); int lirc_dev_fop_open(struct inode *inode, struct file *file) { struct irctl *ir; struct cdev *cdev; int retval = 0; if (iminor(inode) >= MAX_IRCTL_DEVICES) { printk(KERN_WARNING "lirc_dev [%d]: open result = -ENODEV\n", iminor(inode)); return -ENODEV; } if (mutex_lock_interruptible(&lirc_dev_lock)) return -ERESTARTSYS; ir = irctls[iminor(inode)]; if (!ir) { retval = -ENODEV; goto error; } dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor); if (ir->d.minor == NOPLUG) { retval = -ENODEV; goto error; } if (ir->open) { retval = -EBUSY; goto error; } cdev = ir->cdev; if (try_module_get(cdev->owner)) { ir->open++; retval = ir->d.set_use_inc(ir->d.data); if (retval) { module_put(cdev->owner); ir->open--; } else { lirc_buffer_clear(ir->buf); } if (ir->task) wake_up_process(ir->task); } error: if (ir) dev_dbg(ir->d.dev, LOGHEAD "open result = %d\n", ir->d.name, ir->d.minor, retval); mutex_unlock(&lirc_dev_lock); nonseekable_open(inode, file); return retval; } EXPORT_SYMBOL(lirc_dev_fop_open); int lirc_dev_fop_close(struct inode *inode, struct file *file) { struct irctl *ir = irctls[iminor(inode)]; struct cdev *cdev; if (!ir) { printk(KERN_ERR "%s: called with invalid irctl\n", __func__); return -EINVAL; } cdev = ir->cdev; dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor); WARN_ON(mutex_lock_killable(&lirc_dev_lock)); ir->open--; if (ir->attached) { ir->d.set_use_dec(ir->d.data); module_put(cdev->owner); } else { lirc_irctl_cleanup(ir); cdev_del(cdev); irctls[ir->d.minor] = NULL; kfree(cdev); kfree(ir); } mutex_unlock(&lirc_dev_lock); return 0; } EXPORT_SYMBOL(lirc_dev_fop_close); unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait) { struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; unsigned int ret; if (!ir) { printk(KERN_ERR "%s: called with invalid irctl\n", __func__); return POLLERR; } dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor); if (!ir->attached) return POLLERR; poll_wait(file, &ir->buf->wait_poll, wait); if (ir->buf) if (lirc_buffer_empty(ir->buf)) ret = 0; else ret = POLLIN | POLLRDNORM; else ret = POLLERR; dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n", ir->d.name, ir->d.minor, ret); return ret; } EXPORT_SYMBOL(lirc_dev_fop_poll); long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { __u32 mode; int result = 0; struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; if (!ir) { printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__); return -ENODEV; } dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n", ir->d.name, ir->d.minor, cmd); if (ir->d.minor == NOPLUG || !ir->attached) { dev_dbg(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n", ir->d.name, ir->d.minor); return -ENODEV; } mutex_lock(&ir->irctl_lock); switch (cmd) { case LIRC_GET_FEATURES: result = put_user(ir->d.features, (__u32 *)arg); break; case LIRC_GET_REC_MODE: if (!(ir->d.features & LIRC_CAN_REC_MASK)) { result = -ENOSYS; break; } result = put_user(LIRC_REC2MODE (ir->d.features & LIRC_CAN_REC_MASK), (__u32 *)arg); break; case LIRC_SET_REC_MODE: if (!(ir->d.features & LIRC_CAN_REC_MASK)) { result = -ENOSYS; break; } result = get_user(mode, (__u32 *)arg); if (!result && !(LIRC_MODE2REC(mode) & ir->d.features)) result = -EINVAL; /* * FIXME: We should actually set the mode somehow but * for now, lirc_serial doesn't support mode changing either */ break; case LIRC_GET_LENGTH: result = put_user(ir->d.code_length, (__u32 *)arg); break; case LIRC_GET_MIN_TIMEOUT: if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || ir->d.min_timeout == 0) { result = -ENOSYS; break; } result = put_user(ir->d.min_timeout, (__u32 *)arg); break; case LIRC_GET_MAX_TIMEOUT: if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || ir->d.max_timeout == 0) { result = -ENOSYS; break; } result = put_user(ir->d.max_timeout, (__u32 *)arg); break; default: result = -EINVAL; } dev_dbg(ir->d.dev, LOGHEAD "ioctl result = %d\n", ir->d.name, ir->d.minor, result); mutex_unlock(&ir->irctl_lock); return result; } EXPORT_SYMBOL(lirc_dev_fop_ioctl); ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length, loff_t *ppos) { struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; unsigned char *buf; int ret = 0, written = 0; DECLARE_WAITQUEUE(wait, current); if (!ir) { printk(KERN_ERR "%s: called with invalid irctl\n", __func__); return -ENODEV; } dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor); buf = kzalloc(ir->chunk_size, GFP_KERNEL); if (!buf) return -ENOMEM; if (mutex_lock_interruptible(&ir->irctl_lock)) { ret = -ERESTARTSYS; goto out_unlocked; } if (!ir->attached) { ret = -ENODEV; goto out_locked; } if (length % ir->chunk_size) { ret = -EINVAL; goto out_locked; } /* * we add ourselves to the task queue before buffer check * to avoid losing scan code (in case when queue is awaken somewhere * between while condition checking and scheduling) */ add_wait_queue(&ir->buf->wait_poll, &wait); set_current_state(TASK_INTERRUPTIBLE); /* * while we didn't provide 'length' bytes, device is opened in blocking * mode and 'copy_to_user' is happy, wait for data. */ while (written < length && ret == 0) { if (lirc_buffer_empty(ir->buf)) { /* According to the read(2) man page, 'written' can be * returned as less than 'length', instead of blocking * again, returning -EWOULDBLOCK, or returning * -ERESTARTSYS */ if (written) break; if (file->f_flags & O_NONBLOCK) { ret = -EWOULDBLOCK; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } mutex_unlock(&ir->irctl_lock); schedule(); set_current_state(TASK_INTERRUPTIBLE); if (mutex_lock_interruptible(&ir->irctl_lock)) { ret = -ERESTARTSYS; remove_wait_queue(&ir->buf->wait_poll, &wait); set_current_state(TASK_RUNNING); goto out_unlocked; } if (!ir->attached) { ret = -ENODEV; break; } } else { lirc_buffer_read(ir->buf, buf); ret = copy_to_user((void *)buffer+written, buf, ir->buf->chunk_size); if (!ret) written += ir->buf->chunk_size; else ret = -EFAULT; } } remove_wait_queue(&ir->buf->wait_poll, &wait); set_current_state(TASK_RUNNING); out_locked: mutex_unlock(&ir->irctl_lock); out_unlocked: kfree(buf); dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n", ir->d.name, ir->d.minor, ret ? "<fail>" : "<ok>", ret); return ret ? ret : written; } EXPORT_SYMBOL(lirc_dev_fop_read); void *lirc_get_pdata(struct file *file) { void *data = NULL; if (file && file->f_dentry && file->f_dentry->d_inode && file->f_dentry->d_inode->i_rdev) { struct irctl *ir; ir = irctls[iminor(file->f_dentry->d_inode)]; data = ir->d.data; } return data; } EXPORT_SYMBOL(lirc_get_pdata); ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer, size_t length, loff_t *ppos) { struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; if (!ir) { printk(KERN_ERR "%s: called with invalid irctl\n", __func__); return -ENODEV; } dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor); if (!ir->attached) return -ENODEV; return -EINVAL; } EXPORT_SYMBOL(lirc_dev_fop_write); static int __init lirc_dev_init(void) { int retval; lirc_class = class_create(THIS_MODULE, "lirc"); if (IS_ERR(lirc_class)) { retval = PTR_ERR(lirc_class); printk(KERN_ERR "lirc_dev: class_create failed\n"); goto error; } retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES, IRCTL_DEV_NAME); if (retval) { class_destroy(lirc_class); printk(KERN_ERR "lirc_dev: alloc_chrdev_region failed\n"); goto error; } printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, " "major %d \n", MAJOR(lirc_base_dev)); error: return retval; } static void __exit lirc_dev_exit(void) { class_destroy(lirc_class); unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES); printk(KERN_INFO "lirc_dev: module unloaded\n"); } module_init(lirc_dev_init); module_exit(lirc_dev_exit); MODULE_DESCRIPTION("LIRC base driver module"); MODULE_AUTHOR("Artur Lipowski"); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging messages");
gpl-2.0
robcore/machinex_kernelv2
drivers/acpi/acpica/evglock.c
4852
10676
/****************************************************************************** * * Module Name: evglock - Global Lock support * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evglock") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static u32 acpi_ev_global_lock_handler(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_init_global_lock_handler * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install a handler for the global lock release event * ******************************************************************************/ acpi_status acpi_ev_init_global_lock_handler(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); /* If Hardware Reduced flag is set, there is no global lock */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, acpi_ev_global_lock_handler, NULL); /* * If the global lock does not exist on this platform, the attempt to * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick). * Map to AE_OK, but mark global lock as not present. Any attempt to * actually use the global lock will be flagged with an error. */ acpi_gbl_global_lock_present = FALSE; if (status == AE_NO_HARDWARE_RESPONSE) { ACPI_ERROR((AE_INFO, "No response from Global Lock hardware, disabling lock")); return_ACPI_STATUS(AE_OK); } status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_global_lock_pending = FALSE; acpi_gbl_global_lock_present = TRUE; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_global_lock_handler * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Remove the handler for the Global Lock * ******************************************************************************/ acpi_status acpi_ev_remove_global_lock_handler(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler); acpi_gbl_global_lock_present = FALSE; status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL, acpi_ev_global_lock_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_global_lock_handler * * PARAMETERS: Context - From thread interface, not used * * RETURN: ACPI_INTERRUPT_HANDLED * * DESCRIPTION: Invoked directly from the SCI handler when a global lock * release interrupt occurs. If there is actually a pending * request for the lock, signal the waiting thread. * ******************************************************************************/ static u32 acpi_ev_global_lock_handler(void *context) { acpi_status status; acpi_cpu_flags flags; flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock); /* * If a request for the global lock is not actually pending, * we are done. This handles "spurious" global lock interrupts * which are possible (and have been seen) with bad BIOSs. */ if (!acpi_gbl_global_lock_pending) { goto cleanup_and_exit; } /* * Send a unit to the global lock semaphore. The actual acquisition * of the global lock will be performed by the waiting thread. */ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); } acpi_gbl_global_lock_pending = FALSE; cleanup_and_exit: acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags); return (ACPI_INTERRUPT_HANDLED); } /****************************************************************************** * * FUNCTION: acpi_ev_acquire_global_lock * * PARAMETERS: Timeout - Max time to wait for the lock, in millisec. * * RETURN: Status * * DESCRIPTION: Attempt to gain ownership of the Global Lock. * * MUTEX: Interpreter must be locked * * Note: The original implementation allowed multiple threads to "acquire" the * Global Lock, and the OS would hold the lock until the last thread had * released it. However, this could potentially starve the BIOS out of the * lock, especially in the case where there is a tight handshake between the * Embedded Controller driver and the BIOS. Therefore, this implementation * allows only one thread to acquire the HW Global Lock at a time, and makes * the global lock appear as a standard mutex on the OS side. * *****************************************************************************/ acpi_status acpi_ev_acquire_global_lock(u16 timeout) { acpi_cpu_flags flags; acpi_status status; u8 acquired = FALSE; ACPI_FUNCTION_TRACE(ev_acquire_global_lock); /* * Only one thread can acquire the GL at a time, the global_lock_mutex * enforces this. This interface releases the interpreter if we must wait. */ status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex. os_mutex, timeout); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Update the global lock handle and check for wraparound. The handle is * only used for the external global lock interfaces, but it is updated * here to properly handle the case where a single thread may acquire the * lock via both the AML and the acpi_acquire_global_lock interfaces. The * handle is therefore updated on the first acquire from a given thread * regardless of where the acquisition request originated. */ acpi_gbl_global_lock_handle++; if (acpi_gbl_global_lock_handle == 0) { acpi_gbl_global_lock_handle = 1; } /* * Make sure that a global lock actually exists. If not, just * treat the lock as a standard mutex. */ if (!acpi_gbl_global_lock_present) { acpi_gbl_global_lock_acquired = TRUE; return_ACPI_STATUS(AE_OK); } flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock); do { /* Attempt to acquire the actual hardware lock */ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); if (acquired) { acpi_gbl_global_lock_acquired = TRUE; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Acquired hardware Global Lock\n")); break; } /* * Did not get the lock. The pending bit was set above, and * we must now wait until we receive the global lock * released interrupt. */ acpi_gbl_global_lock_pending = TRUE; acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); /* * Wait for handshake with the global lock interrupt handler. * This interface releases the interpreter if we must wait. */ status = acpi_ex_system_wait_semaphore (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER); flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock); } while (ACPI_SUCCESS(status)); acpi_gbl_global_lock_pending = FALSE; acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_release_global_lock * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Releases ownership of the Global Lock. * ******************************************************************************/ acpi_status acpi_ev_release_global_lock(void) { u8 pending = FALSE; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_release_global_lock); /* Lock must be already acquired */ if (!acpi_gbl_global_lock_acquired) { ACPI_WARNING((AE_INFO, "Cannot release the ACPI Global Lock, it has not been acquired")); return_ACPI_STATUS(AE_NOT_ACQUIRED); } if (acpi_gbl_global_lock_present) { /* Allow any thread to release the lock */ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending); /* * If the pending bit was set, we must write GBL_RLS to the control * register */ if (pending) { status = acpi_write_bit_register (ACPI_BITREG_GLOBAL_LOCK_RELEASE, ACPI_ENABLE_EVENT); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Released hardware Global Lock\n")); } acpi_gbl_global_lock_acquired = FALSE; /* Release the local GL mutex */ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex); return_ACPI_STATUS(status); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
vocoderism/kernel_falcon_umts_kk
drivers/acpi/acpica/nsutils.c
4852
21068
/****************************************************************************** * * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "amlcode.h" #include "actables.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsutils") /* Local prototypes */ static u8 acpi_ns_valid_path_separator(char sep); #ifdef ACPI_OBSOLETE_FUNCTIONS acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); #endif /******************************************************************************* * * FUNCTION: acpi_ns_print_node_pathname * * PARAMETERS: Node - Object * Message - Prefix message * * DESCRIPTION: Print an object's full namespace pathname * Manages allocation/freeing of a pathname buffer * ******************************************************************************/ void acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *message) { struct acpi_buffer buffer; acpi_status status; if (!node) { acpi_os_printf("[NULL NAME]"); return; } /* Convert handle to full pathname and print it (with supplied message) */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_ns_handle_to_pathname(node, &buffer); if (ACPI_SUCCESS(status)) { if (message) { acpi_os_printf("%s ", message); } acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node); ACPI_FREE(buffer.pointer); } } /******************************************************************************* * * FUNCTION: acpi_ns_valid_root_prefix * * PARAMETERS: Prefix - Character to be checked * * RETURN: TRUE if a valid prefix * * DESCRIPTION: Check if a character is a valid ACPI Root prefix * ******************************************************************************/ u8 acpi_ns_valid_root_prefix(char prefix) { return ((u8) (prefix == '\\')); } /******************************************************************************* * * FUNCTION: acpi_ns_valid_path_separator * * PARAMETERS: Sep - Character to be checked * * RETURN: TRUE if a valid path separator * * DESCRIPTION: Check if a character is a valid ACPI path separator * ******************************************************************************/ static u8 acpi_ns_valid_path_separator(char sep) { return ((u8) (sep == '.')); } /******************************************************************************* * * FUNCTION: acpi_ns_get_type * * PARAMETERS: Node - Parent Node to be examined * * RETURN: Type field from Node whose handle is passed * * DESCRIPTION: Return the type of a Namespace node * ******************************************************************************/ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node) { ACPI_FUNCTION_TRACE(ns_get_type); if (!node) { ACPI_WARNING((AE_INFO, "Null Node parameter")); return_UINT32(ACPI_TYPE_ANY); } return_UINT32((acpi_object_type) node->type); } /******************************************************************************* * * FUNCTION: acpi_ns_local * * PARAMETERS: Type - A namespace object type * * RETURN: LOCAL if names must be found locally in objects of the * passed type, 0 if enclosing scopes should be searched * * DESCRIPTION: Returns scope rule for the given object type. * ******************************************************************************/ u32 acpi_ns_local(acpi_object_type type) { ACPI_FUNCTION_TRACE(ns_local); if (!acpi_ut_valid_object_type(type)) { /* Type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); } /******************************************************************************* * * FUNCTION: acpi_ns_get_internal_name_length * * PARAMETERS: Info - Info struct initialized with the * external name pointer. * * RETURN: None * * DESCRIPTION: Calculate the length of the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) { const char *next_external_char; u32 i; ACPI_FUNCTION_ENTRY(); next_external_char = info->external_name; info->num_carats = 0; info->num_segments = 0; info->fully_qualified = FALSE; /* * For the internal name, the required length is 4 bytes per segment, plus * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null * (which is not really needed, but no there's harm in putting it there) * * strlen() + 1 covers the first name_seg, which has no path separator */ if (acpi_ns_valid_root_prefix(*next_external_char)) { info->fully_qualified = TRUE; next_external_char++; /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */ while (acpi_ns_valid_root_prefix(*next_external_char)) { next_external_char++; } } else { /* Handle Carat prefixes */ while (*next_external_char == '^') { info->num_carats++; next_external_char++; } } /* * Determine the number of ACPI name "segments" by counting the number of * path separators within the string. Start with one segment since the * segment count is [(# separators) + 1], and zero separators is ok. */ if (*next_external_char) { info->num_segments = 1; for (i = 0; next_external_char[i]; i++) { if (acpi_ns_valid_path_separator(next_external_char[i])) { info->num_segments++; } } } info->length = (ACPI_NAME_SIZE * info->num_segments) + 4 + info->num_carats; info->next_external_char = next_external_char; } /******************************************************************************* * * FUNCTION: acpi_ns_build_internal_name * * PARAMETERS: Info - Info struct fully initialized * * RETURN: Status * * DESCRIPTION: Construct the internal (AML) namestring * corresponding to the external (ASL) namestring. * ******************************************************************************/ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) { u32 num_segments = info->num_segments; char *internal_name = info->internal_name; const char *external_name = info->next_external_char; char *result = NULL; u32 i; ACPI_FUNCTION_TRACE(ns_build_internal_name); /* Setup the correct prefixes, counts, and pointers */ if (info->fully_qualified) { internal_name[0] = '\\'; if (num_segments <= 1) { result = &internal_name[1]; } else if (num_segments == 2) { internal_name[1] = AML_DUAL_NAME_PREFIX; result = &internal_name[2]; } else { internal_name[1] = AML_MULTI_NAME_PREFIX_OP; internal_name[2] = (char)num_segments; result = &internal_name[3]; } } else { /* * Not fully qualified. * Handle Carats first, then append the name segments */ i = 0; if (info->num_carats) { for (i = 0; i < info->num_carats; i++) { internal_name[i] = '^'; } } if (num_segments <= 1) { result = &internal_name[i]; } else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; result = &internal_name[(acpi_size) i + 1]; } else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; internal_name[(acpi_size) i + 1] = (char)num_segments; result = &internal_name[(acpi_size) i + 2]; } } /* Build the name (minus path separators) */ for (; num_segments; num_segments--) { for (i = 0; i < ACPI_NAME_SIZE; i++) { if (acpi_ns_valid_path_separator(*external_name) || (*external_name == 0)) { /* Pad the segment with underscore(s) if segment is short */ result[i] = '_'; } else { /* Convert the character to uppercase and save it */ result[i] = (char)ACPI_TOUPPER((int)*external_name); external_name++; } } /* Now we must have a path separator, or the pathname is bad */ if (!acpi_ns_valid_path_separator(*external_name) && (*external_name != 0)) { return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Move on the next segment */ external_name++; result += ACPI_NAME_SIZE; } /* Terminate the string */ *result = 0; if (info->fully_qualified) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (abs) \"\\%s\"\n", internal_name, internal_name)); } else { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n", internal_name, internal_name)); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_internalize_name * * PARAMETERS: *external_name - External representation of name * **Converted Name - Where to return the resulting * internal represention of the name * * RETURN: Status * * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0") * to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * *******************************************************************************/ acpi_status acpi_ns_internalize_name(const char *external_name, char **converted_name) { char *internal_name; struct acpi_namestring_info info; acpi_status status; ACPI_FUNCTION_TRACE(ns_internalize_name); if ((!external_name) || (*external_name == 0) || (!converted_name)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the length of the new internal name */ info.external_name = external_name; acpi_ns_get_internal_name_length(&info); /* We need a segment to store the internal name */ internal_name = ACPI_ALLOCATE_ZEROED(info.length); if (!internal_name) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Build the name */ info.internal_name = internal_name; status = acpi_ns_build_internal_name(&info); if (ACPI_FAILURE(status)) { ACPI_FREE(internal_name); return_ACPI_STATUS(status); } *converted_name = internal_name; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_externalize_name * * PARAMETERS: internal_name_length - Lenth of the internal name below * internal_name - Internal representation of name * converted_name_length - Where the length is returned * converted_name - Where the resulting external name * is returned * * RETURN: Status * * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30) * to its external (printable) form (e.g. "\_PR_.CPU0") * ******************************************************************************/ acpi_status acpi_ns_externalize_name(u32 internal_name_length, const char *internal_name, u32 * converted_name_length, char **converted_name) { u32 names_index = 0; u32 num_segments = 0; u32 required_length; u32 prefix_length = 0; u32 i = 0; u32 j = 0; ACPI_FUNCTION_TRACE(ns_externalize_name); if (!internal_name_length || !internal_name || !converted_name) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Check for a prefix (one '\' | one or more '^') */ switch (internal_name[0]) { case '\\': prefix_length = 1; break; case '^': for (i = 0; i < internal_name_length; i++) { if (internal_name[i] == '^') { prefix_length = i + 1; } else { break; } } if (i == internal_name_length) { prefix_length = i; } break; default: break; } /* * Check for object names. Note that there could be 0-255 of these * 4-byte elements. */ if (prefix_length < internal_name_length) { switch (internal_name[prefix_length]) { case AML_MULTI_NAME_PREFIX_OP: /* <count> 4-byte names */ names_index = prefix_length + 2; num_segments = (u8) internal_name[(acpi_size) prefix_length + 1]; break; case AML_DUAL_NAME_PREFIX: /* Two 4-byte names */ names_index = prefix_length + 1; num_segments = 2; break; case 0: /* null_name */ names_index = 0; num_segments = 0; break; default: /* one 4-byte name */ names_index = prefix_length; num_segments = 1; break; } } /* * Calculate the length of converted_name, which equals the length * of the prefix, length of all object names, length of any required * punctuation ('.') between object names, plus the NULL terminator. */ required_length = prefix_length + (4 * num_segments) + ((num_segments > 0) ? (num_segments - 1) : 0) + 1; /* * Check to see if we're still in bounds. If not, there's a problem * with internal_name (invalid format). */ if (required_length > internal_name_length) { ACPI_ERROR((AE_INFO, "Invalid internal name")); return_ACPI_STATUS(AE_BAD_PATHNAME); } /* Build the converted_name */ *converted_name = ACPI_ALLOCATE_ZEROED(required_length); if (!(*converted_name)) { return_ACPI_STATUS(AE_NO_MEMORY); } j = 0; for (i = 0; i < prefix_length; i++) { (*converted_name)[j++] = internal_name[i]; } if (num_segments > 0) { for (i = 0; i < num_segments; i++) { if (i > 0) { (*converted_name)[j++] = '.'; } (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; (*converted_name)[j++] = internal_name[names_index++]; } } if (converted_name_length) { *converted_name_length = (u32) required_length; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_validate_handle * * PARAMETERS: Handle - Handle to be validated and typecast to a * namespace node. * * RETURN: A pointer to a namespace node * * DESCRIPTION: Convert a namespace handle to a namespace node. Handles special * cases for the root node. * * NOTE: Real integer handles would allow for more verification * and keep all pointers within this subsystem - however this introduces * more overhead and has not been necessary to this point. Drivers * holding handles are typically notified before a node becomes invalid * due to a table unload. * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) { ACPI_FUNCTION_ENTRY(); /* Parameter validation */ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) { return (acpi_gbl_root_node); } /* We can at least attempt to verify the handle */ if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) { return (NULL); } return (ACPI_CAST_PTR(struct acpi_namespace_node, handle)); } /******************************************************************************* * * FUNCTION: acpi_ns_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: free memory allocated for namespace and ACPI table storage. * ******************************************************************************/ void acpi_ns_terminate(void) { union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE(ns_terminate); /* * 1) Free the entire namespace -- all nodes and objects * * Delete all object descriptors attached to namepsace nodes */ acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); /* Detach any objects attached to the root */ obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); if (obj_desc) { acpi_ns_detach_object(acpi_gbl_root_node); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_opens_scope * * PARAMETERS: Type - A valid namespace type * * RETURN: NEWSCOPE if the passed type "opens a name scope" according * to the ACPI specification, else 0 * ******************************************************************************/ u32 acpi_ns_opens_scope(acpi_object_type type) { ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type)); if (!acpi_ut_valid_object_type(type)) { /* type code out of range */ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); return_UINT32(ACPI_NS_NORMAL); } return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); } /******************************************************************************* * * FUNCTION: acpi_ns_get_node * * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The * \ (backslash) and ^ (carat) prefixes, and the * . (period) to separate segments are supported. * prefix_node - Root of subtree to be searched, or NS_ALL for the * root of the name space. If Name is fully * qualified (first s8 is '\'), the passed value * of Scope will not be accessed. * Flags - Used to indicate whether to perform upsearch or * not. * return_node - Where the Node is returned * * DESCRIPTION: Look up a name relative to a given scope and return the * corresponding Node. NOTE: Scope can be null. * * MUTEX: Locks namespace * ******************************************************************************/ acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, const char *pathname, u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; acpi_status status; char *internal_path; ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); if (!pathname) { *return_node = prefix_node; if (!prefix_node) { *return_node = acpi_gbl_root_node; } return_ACPI_STATUS(AE_OK); } /* Convert path to internal representation */ status = acpi_ns_internalize_name(pathname, &internal_path); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Must lock namespace during lookup */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto cleanup; } /* Setup lookup scope (search starting point) */ scope_info.scope.node = prefix_node; /* Lookup the name in the namespace */ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL, return_node); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s, %s\n", pathname, acpi_format_exception(status))); } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); cleanup: ACPI_FREE(internal_path); return_ACPI_STATUS(status); }
gpl-2.0
xXminiWHOOPERxX/MSM8974-Mini-Reloaded-Sense-Kernel
drivers/acpi/acpica/evmisc.c
4852
10158
/****************************************************************************** * * Module Name: evmisc - Miscellaneous event manager support functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evmisc") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_is_notify_object * * PARAMETERS: Node - Node to check * * RETURN: TRUE if notifies allowed on this object * * DESCRIPTION: Check type of node for a object that supports notifies. * * TBD: This could be replaced by a flag bit in the node. * ******************************************************************************/ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) { switch (node->type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: /* * These are the ONLY objects that can receive ACPI notifications */ return (TRUE); default: return (FALSE); } } /******************************************************************************* * * FUNCTION: acpi_ev_queue_notify_request * * PARAMETERS: Node - NS node for the notified object * notify_value - Value from the Notify() request * * RETURN: Status * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ acpi_status acpi_ev_queue_notify_request(struct acpi_namespace_node * node, u32 notify_value) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj = NULL; union acpi_generic_state *notify_info; acpi_status status = AE_OK; ACPI_FUNCTION_NAME(ev_queue_notify_request); /* * For value 0x03 (Ejection Request), may need to run a device method. * For value 0x02 (Device Wake), if _PRW exists, may need to run * the _PS0 method. * For value 0x80 (Status Change) on the power button or sleep button, * initiate soft-off or sleep operation. * * For all cases, simply dispatch the notify to the handler. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n", acpi_ut_get_node_name(node), acpi_ut_get_type_name(node->type), notify_value, acpi_ut_get_notify_name(notify_value), node)); /* Get the notify object attached to the NS Node */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* We have the notify object, Get the correct handler */ switch (node->type) { /* Notify is allowed only on these types */ case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: case ACPI_TYPE_PROCESSOR: if (notify_value <= ACPI_MAX_SYS_NOTIFY) { handler_obj = obj_desc->common_notify.system_notify; } else { handler_obj = obj_desc->common_notify.device_notify; } break; default: /* All other types are not supported */ return (AE_TYPE); } } /* * If there is a handler to run, schedule the dispatcher. * Check for: * 1) Global system notify handler * 2) Global device notify handler * 3) Per-device notify handler */ if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || (acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) { notify_info = acpi_ut_create_generic_state(); if (!notify_info) { return (AE_NO_MEMORY); } if (!handler_obj) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Executing system notify handler for Notify (%4.4s, %X) " "node %p\n", acpi_ut_get_node_name(node), notify_value, node)); } notify_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY; notify_info->notify.node = node; notify_info->notify.value = (u16) notify_value; notify_info->notify.handler_obj = handler_obj; status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, notify_info); if (ACPI_FAILURE(status)) { acpi_ut_delete_generic_state(notify_info); } } else { /* There is no notify handler (per-device or system) for this device */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No notify handler for Notify (%4.4s, %X) node %p\n", acpi_ut_get_node_name(node), notify_value, node)); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_notify_dispatch * * PARAMETERS: Context - To be passed to the notify handler * * RETURN: None. * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) { union acpi_generic_state *notify_info = (union acpi_generic_state *)context; acpi_notify_handler global_handler = NULL; void *global_context = NULL; union acpi_operand_object *handler_obj; ACPI_FUNCTION_ENTRY(); /* * We will invoke a global notify handler if installed. This is done * _before_ we invoke the per-device handler attached to the device. */ if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { /* Global system notification handler */ if (acpi_gbl_system_notify.handler) { global_handler = acpi_gbl_system_notify.handler; global_context = acpi_gbl_system_notify.context; } } else { /* Global driver notification handler */ if (acpi_gbl_device_notify.handler) { global_handler = acpi_gbl_device_notify.handler; global_context = acpi_gbl_device_notify.context; } } /* Invoke the system handler first, if present */ if (global_handler) { global_handler(notify_info->notify.node, notify_info->notify.value, global_context); } /* Now invoke the per-device handler, if present */ handler_obj = notify_info->notify.handler_obj; if (handler_obj) { struct acpi_object_notify_handler *notifier; notifier = &handler_obj->notify; while (notifier) { notifier->handler(notify_info->notify.node, notify_info->notify.value, notifier->context); notifier = notifier->next; } } /* All done with the info object */ acpi_ut_delete_generic_state(notify_info); } #if (!ACPI_REDUCED_HARDWARE) /****************************************************************************** * * FUNCTION: acpi_ev_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Disable events and free memory allocated for table storage. * ******************************************************************************/ void acpi_ev_terminate(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); if (acpi_gbl_events_initialized) { /* * Disable all event-related functionality. In all cases, on error, * print a message but obviously we don't abort. */ /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %u", (u32) i)); } } /* Disable all GPEs in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); /* Remove SCI handler */ status = acpi_ev_remove_sci_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); } status = acpi_ev_remove_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove Global Lock handler")); } } /* Deallocate all handler objects installed within GPE info structs */ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); /* Return to original mode if necessary */ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { status = acpi_disable(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "AcpiDisable failed")); } } return_VOID; } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
nobooya/e975-kk-kernel
drivers/acpi/acpica/evgpe.c
4852
22405
/****************************************************************************** * * Module Name: evgpe - General Purpose Event handling and dispatch * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpe") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_update_gpe_enable_mask * * PARAMETERS: gpe_event_info - GPE to update * * RETURN: Status * * DESCRIPTION: Updates GPE register enable mask based upon whether there are * runtime references to this GPE * ******************************************************************************/ acpi_status acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; u32 register_bit; ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return_ACPI_STATUS(AE_NOT_EXIST); } register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); /* Clear the run bit up front */ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); /* Set the mask bit only if there are references to this GPE */ if (gpe_event_info->runtime_count) { ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* * We will only allow a GPE to be enabled if it has either an associated * method (_Lxx/_Exx) or a handler, or is using the implicit notify * feature. Otherwise, the GPE will be immediately disabled by * acpi_ev_gpe_dispatch the first time it fires. */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled. * ******************************************************************************/ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_add_gpe_reference); if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { /* Enable on first reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_gpe_reference * * PARAMETERS: gpe_event_info - Remove a reference to this GPE * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, the GPE is hardware-disabled. * ******************************************************************************/ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); if (!gpe_event_info->runtime_count) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { /* Disable on last reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_low_get_gpe_info * * PARAMETERS: gpe_number - Raw GPE number * gpe_block - A GPE info block * * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number * is not within the specified GPE block) * * DESCRIPTION: Returns the event_info struct associated with this GPE. This is * the low-level implementation of ev_get_gpe_event_info. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, struct acpi_gpe_block_info *gpe_block) { u32 gpe_index; /* * Validate that the gpe_number is within the specified gpe_block. * (Two steps) */ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { return (NULL); } gpe_index = gpe_number - gpe_block->block_base_number; if (gpe_index >= gpe_block->gpe_count) { return (NULL); } return (&gpe_block->event_info[gpe_index]); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_event_info * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_number - Raw GPE number * * RETURN: A GPE event_info struct. NULL if not a valid GPE * * DESCRIPTION: Returns the event_info struct associated with this GPE. * Validates the gpe_block and the gpe_number * * Should be called only when the GPE lists are semaphore locked * and not subject to change. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number) { union acpi_operand_object *obj_desc; struct acpi_gpe_event_info *gpe_info; u32 i; ACPI_FUNCTION_ENTRY(); /* A NULL gpe_device means use the FADT-defined GPE block(s) */ if (!gpe_device) { /* Examine GPE Block 0 and 1 (These blocks are permanent) */ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { gpe_info = acpi_ev_low_get_gpe_info(gpe_number, acpi_gbl_gpe_fadt_blocks [i]); if (gpe_info) { return (gpe_info); } } /* The gpe_number was not in the range of either FADT GPE block */ return (NULL); } /* A Non-NULL gpe_device means this is a GPE Block Device */ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) gpe_device); if (!obj_desc || !obj_desc->device.gpe_block) { return (NULL); } return (acpi_ev_low_get_gpe_info (gpe_number, obj_desc->device.gpe_block)); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_detect * * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. * Can have multiple GPE blocks attached. * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect if any GP events have occurred. This function is * executed at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_register_info *gpe_register_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); /* Check for the case where there are no GPEs */ if (!gpe_xrupt_list) { return (int_status); } /* * We need to obtain the GPE lock for both the data structs and registers * Note: Not necessary to obtain the hardware lock, since the GPE * registers are owned by the gpe_lock. */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Examine all GPE blocks attached to this interrupt level */ gpe_block = gpe_xrupt_list->gpe_block_list_head; while (gpe_block) { /* * Read all of the 8-bit GPE status and enable registers in this GPE * block, saving all of them. Find all currently active GP events. */ for (i = 0; i < gpe_block->register_count; i++) { /* Get the next status/enable pair */ gpe_register_info = &gpe_block->register_info[i]; /* * Optimization: If there are no GPEs enabled within this * register, we can safely ignore the entire register. */ if (!(gpe_register_info->enable_for_run | gpe_register_info->enable_for_wake)) { continue; } /* Read the Status Register */ status = acpi_hw_read(&status_reg, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Read the Enable Register */ status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", gpe_register_info->base_gpe_number, status_reg, enable_reg)); /* Check if there is anything active at all in this register */ enabled_status_byte = (u8) (status_reg & enable_reg); if (!enabled_status_byte) { /* No active GPEs in this register, move on */ continue; } /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { /* Examine one GPE bit */ if (enabled_status_byte & (1 << j)) { /* * Found an active GPE. Dispatch the event to a handler * or method. */ int_status |= acpi_ev_gpe_dispatch(gpe_block-> node, &gpe_block-> event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } gpe_block = gpe_block->next; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_execute_gpe_method * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * * RETURN: None * * DESCRIPTION: Perform the actual execution of a GPE control method. This * function is called from an invocation of acpi_os_execute and * therefore does NOT execute at interrupt level - so that * the control method itself is not executed in the context of * an interrupt handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; struct acpi_gpe_notify_object *notify_object; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); /* Allocate a local GPE block */ local_gpe_event_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); if (!local_gpe_event_info) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); return_VOID; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); return_VOID; } /* Must revalidate the gpe_number/gpe_block */ if (!acpi_ev_valid_gpe_event(gpe_event_info)) { status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); ACPI_FREE(local_gpe_event_info); return_VOID; } /* * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. */ ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, sizeof(struct acpi_gpe_event_info)); status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_VOID; } /* Do the correct dispatch - normal method or implicit notify */ switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_NOTIFY: /* * Implicit notify. * Dispatch a DEVICE_WAKE notify to the appropriate handler. * NOTE: the request is queued for execution after this method * completes. The notify handlers are NOT invoked synchronously * from this thread -- because handlers may in turn run other * control methods. */ status = acpi_ev_queue_notify_request( local_gpe_event_info->dispatch.device.node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = local_gpe_event_info->dispatch.device.next; while (ACPI_SUCCESS(status) && notify_object) { status = acpi_ev_queue_notify_request( notify_object->node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = notify_object->next; } break; case ACPI_GPE_DISPATCH_METHOD: /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; } else { /* * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx * control method that corresponds to this GPE */ info->prefix_node = local_gpe_event_info->dispatch.method_node; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); ACPI_FREE(info); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "while evaluating GPE method [%4.4s]", acpi_ut_get_node_name (local_gpe_event_info->dispatch. method_node))); } break; default: return_VOID; /* Should never happen */ } /* Defer enabling of GPE until all notify handlers are done */ status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, local_gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_enable_gpe * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * Callback from acpi_os_execute * * RETURN: None * * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to * complete (i.e., finish execution of Notify) * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; (void)acpi_ev_finish_gpe(gpe_event_info); ACPI_FREE(gpe_event_info); return; } /******************************************************************************* * * FUNCTION: acpi_ev_finish_gpe * * PARAMETERS: gpe_event_info - Info for this GPE * * RETURN: Status * * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution * of a GPE method or a synchronous or asynchronous GPE handler. * ******************************************************************************/ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return (status); } } /* * Enable this GPE, conditionally. This means that the GPE will * only be physically enabled if the enable_for_run bit is set * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_dispatch * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_event_info - Info for this GPE * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) * or method (e.g. _Lxx/_Exx) handler. * * This function executes at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); /* Invoke global event handler if present */ acpi_gpe_count++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, gpe_number, acpi_gbl_global_event_handler_context); } /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Always disable the GPE so that it does not keep firing before * any asynchronous activity completes (either from the execution * of a GPE method or an asynchronous GPE handler.) * * If there is no handler or method to run, just disable the * GPE and leave it disabled permanently to prevent further such * pointless events from firing. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler * exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, leave the GPE * disabled. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* Invoke the installed handler (at interrupt level) */ return_value = gpe_event_info->dispatch.handler->address(gpe_device, gpe_number, gpe_event_info-> dispatch.handler-> context); /* If requested, clear (if level-triggered) and reenable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE%2X - event disabled", gpe_number)); } break; default: /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE%02X, disabling event", gpe_number)); break; } return_UINT32(ACPI_INTERRUPT_HANDLED); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
Renzo-Olivares/android_43_kernel_htc_monarudo
drivers/pcmcia/sa1100_shannon.c
4852
2482
/* * drivers/pcmcia/sa1100_shannon.c * * PCMCIA implementation routines for Shannon * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <mach/shannon.h> #include <asm/irq.h> #include "sa1100_generic.h" static int shannon_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* All those are inputs */ GAFR &= ~(GPIO_GPIO(SHANNON_GPIO_EJECT_0) | GPIO_GPIO(SHANNON_GPIO_EJECT_1) | GPIO_GPIO(SHANNON_GPIO_RDY_0) | GPIO_GPIO(SHANNON_GPIO_RDY_1)); if (skt->nr == 0) { skt->stat[SOC_STAT_CD].gpio = SHANNON_GPIO_EJECT_0; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD_0"; skt->stat[SOC_STAT_RDY].gpio = SHANNON_GPIO_RDY_0; skt->stat[SOC_STAT_RDY].name = "PCMCIA_RDY_0"; } else { skt->stat[SOC_STAT_CD].gpio = SHANNON_GPIO_EJECT_1; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD_1"; skt->stat[SOC_STAT_RDY].gpio = SHANNON_GPIO_RDY_1; skt->stat[SOC_STAT_RDY].name = "PCMCIA_RDY_1"; } return 0; } static void shannon_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { switch (skt->nr) { case 0: state->bvd1 = 1; state->bvd2 = 1; state->vs_3v = 1; /* FIXME Can only apply 3.3V on Shannon. */ state->vs_Xv = 0; break; case 1: state->bvd1 = 1; state->bvd2 = 1; state->vs_3v = 1; /* FIXME Can only apply 3.3V on Shannon. */ state->vs_Xv = 0; break; } } static int shannon_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { switch (state->Vcc) { case 0: /* power off */ printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __func__); break; case 50: printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __func__); case 33: break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __func__); /* Silently ignore Vpp, output enable, speaker enable. */ return 0; } static struct pcmcia_low_level shannon_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = shannon_pcmcia_hw_init, .socket_state = shannon_pcmcia_socket_state, .configure_socket = shannon_pcmcia_configure_socket, }; int __devinit pcmcia_shannon_init(struct device *dev) { int ret = -ENODEV; if (machine_is_shannon()) ret = sa11xx_drv_pcmcia_probe(dev, &shannon_pcmcia_ops, 0, 2); return ret; }
gpl-2.0
drewx2/Find5-Kernel-Source
drivers/acpi/acpica/evevent.c
4852
8774
/****************************************************************************** * * Module Name: evevent - Fixed Event handling and dispatch * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evevent") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static acpi_status acpi_ev_fixed_event_initialize(void); static u32 acpi_ev_fixed_event_dispatch(u32 event); /******************************************************************************* * * FUNCTION: acpi_ev_initialize_events * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE) * ******************************************************************************/ acpi_status acpi_ev_initialize_events(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_initialize_events); /* If Hardware Reduced flag is set, there are no fixed events */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* * Initialize the Fixed and General Purpose Events. This is done prior to * enabling SCIs to prevent interrupts from occurring before the handlers * are installed. */ status = acpi_ev_fixed_event_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize fixed events")); return_ACPI_STATUS(status); } status = acpi_ev_gpe_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize general purpose events")); return_ACPI_STATUS(status); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_install_xrupt_handlers * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock * ******************************************************************************/ acpi_status acpi_ev_install_xrupt_handlers(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); /* If Hardware Reduced flag is set, there is no ACPI h/w */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* Install the SCI handler */ status = acpi_ev_install_sci_handler(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to install System Control Interrupt handler")); return_ACPI_STATUS(status); } /* Install the handler for the Global Lock */ status = acpi_ev_init_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize Global Lock handler")); return_ACPI_STATUS(status); } acpi_gbl_events_initialized = TRUE; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install the fixed event handlers and disable all fixed events. * ******************************************************************************/ static acpi_status acpi_ev_fixed_event_initialize(void) { u32 i; acpi_status status; /* * Initialize the structure that keeps track of fixed event handlers and * enable the fixed events. */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_gbl_fixed_event_handlers[i].handler = NULL; acpi_gbl_fixed_event_handlers[i].context = NULL; /* Disable the fixed event */ if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { status = acpi_write_bit_register(acpi_gbl_fixed_event_info [i].enable_register_id, ACPI_DISABLE_EVENT); if (ACPI_FAILURE(status)) { return (status); } } } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_detect * * PARAMETERS: None * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Checks the PM status register for active fixed events * ******************************************************************************/ u32 acpi_ev_fixed_event_detect(void) { u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u32 fixed_status; u32 fixed_enable; u32 i; ACPI_FUNCTION_NAME(ev_fixed_event_detect); /* * Read the fixed feature status and enable registers, as all the cases * depend on their values. Ignore errors here. */ (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Fixed Event Block: Enable %08X Status %08X\n", fixed_enable, fixed_status)); /* * Check for all possible Fixed Events and dispatch those that are active */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { /* Both the status and enable bits must be on for this event */ if ((fixed_status & acpi_gbl_fixed_event_info[i]. status_bit_mask) && (fixed_enable & acpi_gbl_fixed_event_info[i]. enable_bit_mask)) { /* * Found an active (signalled) event. Invoke global event * handler if present. */ acpi_fixed_event_count[i]++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler (ACPI_EVENT_TYPE_FIXED, NULL, i, acpi_gbl_global_event_handler_context); } int_status |= acpi_ev_fixed_event_dispatch(i); } } return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_dispatch * * PARAMETERS: Event - Event type * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Clears the status bit for the requested event, calls the * handler that previously registered for the event. * ******************************************************************************/ static u32 acpi_ev_fixed_event_dispatch(u32 event) { ACPI_FUNCTION_ENTRY(); /* Clear the status bit */ (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, ACPI_CLEAR_STATUS); /* * Make sure we've got a handler. If not, report an error. The event is * disabled to prevent further interrupts. */ if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_DISABLE_EVENT); ACPI_ERROR((AE_INFO, "No installed handler for fixed event [0x%08X]", event)); return (ACPI_INTERRUPT_NOT_HANDLED); } /* Invoke the Fixed Event handler */ return ((acpi_gbl_fixed_event_handlers[event]. handler) (acpi_gbl_fixed_event_handlers[event].context)); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
GruesomeWolf/Slippery_Sloth
arch/microblaze/lib/memmove.c
7668
5442
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memcpy on Microblaze * This is generic C code to do efficient, alignment-aware memmove. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/string.h> #ifdef __HAVE_ARCH_MEMMOVE #ifndef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* copy backwards, from end to beginning */ src += c; dst += c; /* Simple, byte oriented memmove. */ while (c--) *--dst = *--src; return v_dst; } #else /* CONFIG_OPT_LIB_FUNCTION */ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; const uint32_t *i_src; uint32_t *i_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on * different boundaries shifts will be necessary. This might result in * bad performance on MicroBlaze systems without a barrel shifter. */ /* FIXME this part needs more test */ /* Do a descending copy - this is a bit trickier! */ dst += c; src += c; if (c >= 4) { unsigned value, buf_hold; /* Align the destination to a word boundary. */ /* This is done in an endian independent manner. */ switch ((unsigned long)dst & 3) { case 3: *--dst = *--src; --c; case 2: *--dst = *--src; --c; case 1: *--dst = *--src; --c; } i_dst = (void *)dst; /* Choose a copy scheme based on the source */ /* alignment relative to dstination. */ switch ((unsigned long)src & 3) { case 0x0: /* Both byte offsets are aligned */ i_src = (const void *)src; for (; c >= 4; c -= 4) *--i_dst = *--i_src; src = (const void *)i_src; break; case 0x1: /* Unaligned - Off by 1 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 8 | value; buf_hold = value >> 24; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFF) << 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8); buf_hold = (value & 0xFF) << 24; } #endif /* Realign the source */ src = (const void *)i_src; src += 1; break; case 0x2: /* Unaligned - Off by 2 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 16 | value; buf_hold = value >> 16; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFF) << 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16); buf_hold = (value & 0xFFFF) << 16; } #endif /* Realign the source */ src = (const void *)i_src; src += 2; break; case 0x3: /* Unaligned - Off by 3 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 24 | value; buf_hold = value >> 8; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFFFF) << 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFF000000)>> 24); buf_hold = (value & 0xFFFFFF) << 8; } #endif /* Realign the source */ src = (const void *)i_src; src += 3; break; } dst = (void *)i_dst; } /* simple fast copy, ... unless a cache boundary is crossed */ /* Finish off any remaining bytes */ switch (c) { case 4: *--dst = *--src; case 3: *--dst = *--src; case 2: *--dst = *--src; case 1: *--dst = *--src; } return v_dst; } #endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memmove); #endif /* __HAVE_ARCH_MEMMOVE */
gpl-2.0
friedrich420/Sprint-Note-4-Android-5.1.1-Kernel
KernelN910P-5_1_1GIT/drivers/w1/slaves/w1_smem.c
7924
1784
/* * w1_smem.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); static struct w1_family w1_smem_family_01 = { .fid = W1_FAMILY_SMEM_01, }; static struct w1_family w1_smem_family_81 = { .fid = W1_FAMILY_SMEM_81, }; static int __init w1_smem_init(void) { int err; err = w1_register_family(&w1_smem_family_01); if (err) return err; err = w1_register_family(&w1_smem_family_81); if (err) { w1_unregister_family(&w1_smem_family_01); return err; } return 0; } static void __exit w1_smem_fini(void) { w1_unregister_family(&w1_smem_family_01); w1_unregister_family(&w1_smem_family_81); } module_init(w1_smem_init); module_exit(w1_smem_fini);
gpl-2.0
ivecera/net-next
sound/pci/asihpi/hpios.c
9716
2680
/****************************************************************************** AudioScience HPI driver Copyright (C) 1997-2012 AudioScience Inc. <support@audioscience.com> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation; This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA HPI Operating System function implementation for Linux (C) Copyright AudioScience Inc. 1997-2003 ******************************************************************************/ #define SOURCEFILE_NAME "hpios.c" #include "hpi_internal.h" #include "hpidebug.h" #include <linux/delay.h> #include <linux/sched.h> void hpios_delay_micro_seconds(u32 num_micro_sec) { if ((usecs_to_jiffies(num_micro_sec) > 1) && !in_interrupt()) { /* MUST NOT SCHEDULE IN INTERRUPT CONTEXT! */ schedule_timeout_uninterruptible(usecs_to_jiffies (num_micro_sec)); } else if (num_micro_sec <= 2000) udelay(num_micro_sec); else mdelay(num_micro_sec / 1000); } /** Allocate an area of locked memory for bus master DMA operations. If allocation fails, return 1, and *pMemArea.size = 0 */ u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size, struct pci_dev *pdev) { /*?? any benefit in using managed dmam_alloc_coherent? */ p_mem_area->vaddr = dma_alloc_coherent(&pdev->dev, size, &p_mem_area->dma_handle, GFP_DMA32 | GFP_KERNEL); if (p_mem_area->vaddr) { HPI_DEBUG_LOG(DEBUG, "allocated %d bytes, dma 0x%x vma %p\n", size, (unsigned int)p_mem_area->dma_handle, p_mem_area->vaddr); p_mem_area->pdev = &pdev->dev; p_mem_area->size = size; return 0; } else { HPI_DEBUG_LOG(WARNING, "failed to allocate %d bytes locked memory\n", size); p_mem_area->size = 0; return 1; } } u16 hpios_locked_mem_free(struct consistent_dma_area *p_mem_area) { if (p_mem_area->size) { dma_free_coherent(p_mem_area->pdev, p_mem_area->size, p_mem_area->vaddr, p_mem_area->dma_handle); HPI_DEBUG_LOG(DEBUG, "freed %lu bytes, dma 0x%x vma %p\n", (unsigned long)p_mem_area->size, (unsigned int)p_mem_area->dma_handle, p_mem_area->vaddr); p_mem_area->size = 0; return 0; } else { return 1; } }
gpl-2.0
bonsaijem/ProteusX
drivers/infiniband/hw/amso1100/c2_alloc.c
13300
4082
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/bitmap.h> #include "c2.h" static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, struct sp_chunk **head) { int i; struct sp_chunk *new_head; dma_addr_t dma_addr; new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, &dma_addr, gfp_mask); if (new_head == NULL) return -ENOMEM; new_head->dma_addr = dma_addr; dma_unmap_addr_set(new_head, mapping, new_head->dma_addr); new_head->next = NULL; new_head->head = 0; /* build list where each index is the next free slot */ for (i = 0; i < (PAGE_SIZE - sizeof(struct sp_chunk) - sizeof(u16)) / sizeof(u16) - 1; i++) { new_head->shared_ptr[i] = i + 1; } /* terminate list */ new_head->shared_ptr[i] = 0xFFFF; *head = new_head; return 0; } int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, struct sp_chunk **root) { return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); } void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) { struct sp_chunk *next; while (root) { next = root->next; dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, dma_unmap_addr(root, mapping)); root = next; } } __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, dma_addr_t *dma_addr, gfp_t gfp_mask) { u16 mqsp; while (head) { mqsp = head->head; if (mqsp != 0xFFFF) { head->head = head->shared_ptr[mqsp]; break; } else if (head->next == NULL) { if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == 0) { head = head->next; mqsp = head->head; head->head = head->shared_ptr[mqsp]; break; } else return NULL; } else head = head->next; } if (head) { *dma_addr = head->dma_addr + ((unsigned long) &(head->shared_ptr[mqsp]) - (unsigned long) head); pr_debug("%s addr %p dma_addr %llx\n", __func__, &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); return (__force __be16 *) &(head->shared_ptr[mqsp]); } return NULL; } void c2_free_mqsp(__be16 *mqsp) { struct sp_chunk *head; u16 idx; /* The chunk containing this ptr begins at the page boundary */ head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); /* Link head to new mqsp */ *mqsp = (__force __be16) head->head; /* Compute the shared_ptr index */ idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1; idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1; /* Point this index at the head */ head->shared_ptr[idx] = head->head; /* Point head at this index */ head->head = idx; }
gpl-2.0
Fusion-Devices/android_kernel_asus_flo
fs/nfsd/nfs4xdr.c
501
92730
/* * Server-side XDR for NFSv4 * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * TODO: Neil Brown made the following observation: We currently * initially reserve NFSD_BUFSIZE space on the transmit queue and * never release any of that until the request is complete. * It would be good to calculate a new maximum response size while * decoding the COMPOUND, and call svc_reserve with this number * at the end of nfs4svc_decode_compoundargs. */ #include <linux/slab.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/utsname.h> #include <linux/pagemap.h> #include <linux/sunrpc/svcauth_gss.h> #include "idmap.h" #include "acl.h" #include "xdr4.h" #include "vfs.h" #include "state.h" #include "cache.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * As per referral draft, the fsid for a referral MUST be different from the fsid of the containing * directory in order to indicate to the client that a filesystem boundary is present * We use a fixed fsid for a referral */ #define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL #define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL static __be32 check_filename(char *str, int len, __be32 err) { int i; if (len == 0) return nfserr_inval; if (isdotent(str, len)) return err; for (i = 0; i < len; i++) if (str[i] == '/') return err; return 0; } #define DECODE_HEAD \ __be32 *p; \ __be32 status #define DECODE_TAIL \ status = 0; \ out: \ return status; \ xdr_error: \ dprintk("NFSD: xdr error (%s:%d)\n", \ __FILE__, __LINE__); \ status = nfserr_bad_xdr; \ goto out #define READ32(x) (x) = ntohl(*p++) #define READ64(x) do { \ (x) = (u64)ntohl(*p++) << 32; \ (x) |= ntohl(*p++); \ } while (0) #define READTIME(x) do { \ p++; \ (x) = ntohl(*p++); \ p++; \ } while (0) #define READMEM(x,nbytes) do { \ x = (char *)p; \ p += XDR_QUADLEN(nbytes); \ } while (0) #define SAVEMEM(x,nbytes) do { \ if (!(x = (p==argp->tmp || p == argp->tmpp) ? \ savemem(argp, p, nbytes) : \ (char *)p)) { \ dprintk("NFSD: xdr error (%s:%d)\n", \ __FILE__, __LINE__); \ goto xdr_error; \ } \ p += XDR_QUADLEN(nbytes); \ } while (0) #define COPYMEM(x,nbytes) do { \ memcpy((x), p, nbytes); \ p += XDR_QUADLEN(nbytes); \ } while (0) /* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */ #define READ_BUF(nbytes) do { \ if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \ p = argp->p; \ argp->p += XDR_QUADLEN(nbytes); \ } else if (!(p = read_buf(argp, nbytes))) { \ dprintk("NFSD: xdr error (%s:%d)\n", \ __FILE__, __LINE__); \ goto xdr_error; \ } \ } while (0) static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) { /* We want more bytes than seem to be available. * Maybe we need a new page, maybe we have just run out */ unsigned int avail = (char *)argp->end - (char *)argp->p; __be32 *p; if (avail + argp->pagelen < nbytes) return NULL; if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */ return NULL; /* ok, we can do it with the current plus the next page */ if (nbytes <= sizeof(argp->tmp)) p = argp->tmp; else { kfree(argp->tmpp); p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL); if (!p) return NULL; } /* * The following memcpy is safe because read_buf is always * called with nbytes > avail, and the two cases above both * guarantee p points to at least nbytes bytes. */ memcpy(p, argp->p, avail); /* step to next page */ argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { argp->end = argp->p + (argp->pagelen>>2); argp->pagelen = 0; } else { argp->end = argp->p + (PAGE_SIZE>>2); argp->pagelen -= PAGE_SIZE; } memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); argp->p += XDR_QUADLEN(nbytes - avail); return p; } static int zero_clientid(clientid_t *clid) { return (clid->cl_boot == 0) && (clid->cl_id == 0); } static int defer_free(struct nfsd4_compoundargs *argp, void (*release)(const void *), void *p) { struct tmpbuf *tb; tb = kmalloc(sizeof(*tb), GFP_KERNEL); if (!tb) return -ENOMEM; tb->buf = p; tb->release = release; tb->next = argp->to_free; argp->to_free = tb; return 0; } static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes) { if (p == argp->tmp) { p = kmemdup(argp->tmp, nbytes, GFP_KERNEL); if (!p) return NULL; } else { BUG_ON(p != argp->tmpp); argp->tmpp = NULL; } if (defer_free(argp, kfree, p)) { kfree(p); return NULL; } else return (char *)p; } static __be32 nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval) { u32 bmlen; DECODE_HEAD; bmval[0] = 0; bmval[1] = 0; bmval[2] = 0; READ_BUF(4); READ32(bmlen); if (bmlen > 1000) goto xdr_error; READ_BUF(bmlen << 2); if (bmlen > 0) READ32(bmval[0]); if (bmlen > 1) READ32(bmval[1]); if (bmlen > 2) READ32(bmval[2]); DECODE_TAIL; } static __be32 nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *iattr, struct nfs4_acl **acl) { int expected_len, len = 0; u32 dummy32; char *buf; int host_err; DECODE_HEAD; iattr->ia_valid = 0; if ((status = nfsd4_decode_bitmap(argp, bmval))) return status; READ_BUF(4); READ32(expected_len); if (bmval[0] & FATTR4_WORD0_SIZE) { READ_BUF(8); len += 8; READ64(iattr->ia_size); iattr->ia_valid |= ATTR_SIZE; } if (bmval[0] & FATTR4_WORD0_ACL) { u32 nace; struct nfs4_ace *ace; READ_BUF(4); len += 4; READ32(nace); if (nace > NFS4_ACL_MAX) return nfserr_resource; *acl = nfs4_acl_new(nace); if (*acl == NULL) { host_err = -ENOMEM; goto out_nfserr; } defer_free(argp, kfree, *acl); (*acl)->naces = nace; for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) { READ_BUF(16); len += 16; READ32(ace->type); READ32(ace->flag); READ32(ace->access_mask); READ32(dummy32); READ_BUF(dummy32); len += XDR_QUADLEN(dummy32) << 2; READMEM(buf, dummy32); ace->whotype = nfs4_acl_get_whotype(buf, dummy32); status = nfs_ok; if (ace->whotype != NFS4_ACL_WHO_NAMED) ace->who = 0; else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP) status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &ace->who); else status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &ace->who); if (status) return status; } } else *acl = NULL; if (bmval[1] & FATTR4_WORD1_MODE) { READ_BUF(4); len += 4; READ32(iattr->ia_mode); iattr->ia_mode &= (S_IFMT | S_IALLUGO); iattr->ia_valid |= ATTR_MODE; } if (bmval[1] & FATTR4_WORD1_OWNER) { READ_BUF(4); len += 4; READ32(dummy32); READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { READ_BUF(4); len += 4; READ32(dummy32); READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { READ_BUF(4); len += 4; READ32(dummy32); switch (dummy32) { case NFS4_SET_TO_CLIENT_TIME: /* We require the high 32 bits of 'seconds' to be 0, and we ignore all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; READ64(iattr->ia_atime.tv_sec); READ32(iattr->ia_atime.tv_nsec); if (iattr->ia_atime.tv_nsec >= (u32)1000000000) return nfserr_inval; iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET); break; case NFS4_SET_TO_SERVER_TIME: iattr->ia_valid |= ATTR_ATIME; break; default: goto xdr_error; } } if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) { READ_BUF(4); len += 4; READ32(dummy32); switch (dummy32) { case NFS4_SET_TO_CLIENT_TIME: /* We require the high 32 bits of 'seconds' to be 0, and we ignore all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; READ64(iattr->ia_mtime.tv_sec); READ32(iattr->ia_mtime.tv_nsec); if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) return nfserr_inval; iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET); break; case NFS4_SET_TO_SERVER_TIME: iattr->ia_valid |= ATTR_MTIME; break; default: goto xdr_error; } } if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0 || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1 || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) READ_BUF(expected_len - len); else if (len != expected_len) goto xdr_error; DECODE_TAIL; out_nfserr: status = nfserrno(host_err); goto out; } static __be32 nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid) { DECODE_HEAD; READ_BUF(sizeof(stateid_t)); READ32(sid->si_generation); COPYMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); DECODE_TAIL; } static __be32 nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access) { DECODE_HEAD; READ_BUF(4); READ32(access->ac_req_access); DECODE_TAIL; } static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts) { DECODE_HEAD; READ_BUF(NFS4_MAX_SESSIONID_LEN + 8); COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); READ32(bcts->dir); /* XXX: skipping ctsa_use_conn_in_rdma_mode. Perhaps Tom Tucker * could help us figure out we should be using it. */ DECODE_TAIL; } static __be32 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close) { DECODE_HEAD; READ_BUF(4); READ32(close->cl_seqid); return nfsd4_decode_stateid(argp, &close->cl_stateid); DECODE_TAIL; } static __be32 nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit) { DECODE_HEAD; READ_BUF(12); READ64(commit->co_offset); READ32(commit->co_count); DECODE_TAIL; } static __be32 nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create) { DECODE_HEAD; READ_BUF(4); READ32(create->cr_type); switch (create->cr_type) { case NF4LNK: READ_BUF(4); READ32(create->cr_linklen); READ_BUF(create->cr_linklen); /* * The VFS will want a null-terminated string, and * null-terminating in place isn't safe since this might * end on a page boundary: */ create->cr_linkname = kmalloc(create->cr_linklen + 1, GFP_KERNEL); if (!create->cr_linkname) return nfserr_jukebox; memcpy(create->cr_linkname, p, create->cr_linklen); create->cr_linkname[create->cr_linklen] = '\0'; defer_free(argp, kfree, create->cr_linkname); break; case NF4BLK: case NF4CHR: READ_BUF(8); READ32(create->cr_specdata1); READ32(create->cr_specdata2); break; case NF4SOCK: case NF4FIFO: case NF4DIR: default: break; } READ_BUF(4); READ32(create->cr_namelen); READ_BUF(create->cr_namelen); SAVEMEM(create->cr_name, create->cr_namelen); if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval))) return status; status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr, &create->cr_acl); if (status) goto out; DECODE_TAIL; } static inline __be32 nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr) { return nfsd4_decode_stateid(argp, &dr->dr_stateid); } static inline __be32 nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr) { return nfsd4_decode_bitmap(argp, getattr->ga_bmval); } static __be32 nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link) { DECODE_HEAD; READ_BUF(4); READ32(link->li_namelen); READ_BUF(link->li_namelen); SAVEMEM(link->li_name, link->li_namelen); if ((status = check_filename(link->li_name, link->li_namelen, nfserr_inval))) return status; DECODE_TAIL; } static __be32 nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock) { DECODE_HEAD; /* * type, reclaim(boolean), offset, length, new_lock_owner(boolean) */ READ_BUF(28); READ32(lock->lk_type); if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT)) goto xdr_error; READ32(lock->lk_reclaim); READ64(lock->lk_offset); READ64(lock->lk_length); READ32(lock->lk_is_new); if (lock->lk_is_new) { READ_BUF(4); READ32(lock->lk_new_open_seqid); status = nfsd4_decode_stateid(argp, &lock->lk_new_open_stateid); if (status) return status; READ_BUF(8 + sizeof(clientid_t)); READ32(lock->lk_new_lock_seqid); COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t)); READ32(lock->lk_new_owner.len); READ_BUF(lock->lk_new_owner.len); READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len); } else { status = nfsd4_decode_stateid(argp, &lock->lk_old_lock_stateid); if (status) return status; READ_BUF(4); READ32(lock->lk_old_lock_seqid); } DECODE_TAIL; } static __be32 nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt) { DECODE_HEAD; READ_BUF(32); READ32(lockt->lt_type); if((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT)) goto xdr_error; READ64(lockt->lt_offset); READ64(lockt->lt_length); COPYMEM(&lockt->lt_clientid, 8); READ32(lockt->lt_owner.len); READ_BUF(lockt->lt_owner.len); READMEM(lockt->lt_owner.data, lockt->lt_owner.len); DECODE_TAIL; } static __be32 nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku) { DECODE_HEAD; READ_BUF(8); READ32(locku->lu_type); if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT)) goto xdr_error; READ32(locku->lu_seqid); status = nfsd4_decode_stateid(argp, &locku->lu_stateid); if (status) return status; READ_BUF(16); READ64(locku->lu_offset); READ64(locku->lu_length); DECODE_TAIL; } static __be32 nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup) { DECODE_HEAD; READ_BUF(4); READ32(lookup->lo_len); READ_BUF(lookup->lo_len); SAVEMEM(lookup->lo_name, lookup->lo_len); if ((status = check_filename(lookup->lo_name, lookup->lo_len, nfserr_noent))) return status; DECODE_TAIL; } static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when) { __be32 *p; u32 w; READ_BUF(4); READ32(w); *share_access = w & NFS4_SHARE_ACCESS_MASK; *deleg_want = w & NFS4_SHARE_WANT_MASK; if (deleg_when) *deleg_when = w & NFS4_SHARE_WHEN_MASK; switch (w & NFS4_SHARE_ACCESS_MASK) { case NFS4_SHARE_ACCESS_READ: case NFS4_SHARE_ACCESS_WRITE: case NFS4_SHARE_ACCESS_BOTH: break; default: return nfserr_bad_xdr; } w &= ~NFS4_SHARE_ACCESS_MASK; if (!w) return nfs_ok; if (!argp->minorversion) return nfserr_bad_xdr; switch (w & NFS4_SHARE_WANT_MASK) { case NFS4_SHARE_WANT_NO_PREFERENCE: case NFS4_SHARE_WANT_READ_DELEG: case NFS4_SHARE_WANT_WRITE_DELEG: case NFS4_SHARE_WANT_ANY_DELEG: case NFS4_SHARE_WANT_NO_DELEG: case NFS4_SHARE_WANT_CANCEL: break; default: return nfserr_bad_xdr; } w &= ~NFS4_SHARE_WANT_MASK; if (!w) return nfs_ok; if (!deleg_when) /* open_downgrade */ return nfserr_inval; switch (w) { case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL: case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED: case (NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL | NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED): return nfs_ok; } xdr_error: return nfserr_bad_xdr; } static __be32 nfsd4_decode_share_deny(struct nfsd4_compoundargs *argp, u32 *x) { __be32 *p; READ_BUF(4); READ32(*x); /* Note: unlinke access bits, deny bits may be zero. */ if (*x & ~NFS4_SHARE_DENY_BOTH) return nfserr_bad_xdr; return nfs_ok; xdr_error: return nfserr_bad_xdr; } static __be32 nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o) { __be32 *p; READ_BUF(4); READ32(o->len); if (o->len == 0 || o->len > NFS4_OPAQUE_LIMIT) return nfserr_bad_xdr; READ_BUF(o->len); SAVEMEM(o->data, o->len); return nfs_ok; xdr_error: return nfserr_bad_xdr; } static __be32 nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) { DECODE_HEAD; u32 dummy; memset(open->op_bmval, 0, sizeof(open->op_bmval)); open->op_iattr.ia_valid = 0; open->op_openowner = NULL; /* seqid, share_access, share_deny, clientid, ownerlen */ READ_BUF(4); READ32(open->op_seqid); /* decode, yet ignore deleg_when until supported */ status = nfsd4_decode_share_access(argp, &open->op_share_access, &open->op_deleg_want, &dummy); if (status) goto xdr_error; status = nfsd4_decode_share_deny(argp, &open->op_share_deny); if (status) goto xdr_error; READ_BUF(sizeof(clientid_t)); COPYMEM(&open->op_clientid, sizeof(clientid_t)); status = nfsd4_decode_opaque(argp, &open->op_owner); if (status) goto xdr_error; READ_BUF(4); READ32(open->op_create); switch (open->op_create) { case NFS4_OPEN_NOCREATE: break; case NFS4_OPEN_CREATE: READ_BUF(4); READ32(open->op_createmode); switch (open->op_createmode) { case NFS4_CREATE_UNCHECKED: case NFS4_CREATE_GUARDED: status = nfsd4_decode_fattr(argp, open->op_bmval, &open->op_iattr, &open->op_acl); if (status) goto out; break; case NFS4_CREATE_EXCLUSIVE: READ_BUF(NFS4_VERIFIER_SIZE); COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE); break; case NFS4_CREATE_EXCLUSIVE4_1: if (argp->minorversion < 1) goto xdr_error; READ_BUF(NFS4_VERIFIER_SIZE); COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE); status = nfsd4_decode_fattr(argp, open->op_bmval, &open->op_iattr, &open->op_acl); if (status) goto out; break; default: goto xdr_error; } break; default: goto xdr_error; } /* open_claim */ READ_BUF(4); READ32(open->op_claim_type); switch (open->op_claim_type) { case NFS4_OPEN_CLAIM_NULL: case NFS4_OPEN_CLAIM_DELEGATE_PREV: READ_BUF(4); READ32(open->op_fname.len); READ_BUF(open->op_fname.len); SAVEMEM(open->op_fname.data, open->op_fname.len); if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval))) return status; break; case NFS4_OPEN_CLAIM_PREVIOUS: READ_BUF(4); READ32(open->op_delegate_type); break; case NFS4_OPEN_CLAIM_DELEGATE_CUR: status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid); if (status) return status; READ_BUF(4); READ32(open->op_fname.len); READ_BUF(open->op_fname.len); SAVEMEM(open->op_fname.data, open->op_fname.len); if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval))) return status; break; case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_DELEG_PREV_FH: if (argp->minorversion < 1) goto xdr_error; /* void */ break; case NFS4_OPEN_CLAIM_DELEG_CUR_FH: if (argp->minorversion < 1) goto xdr_error; status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid); if (status) return status; break; default: goto xdr_error; } DECODE_TAIL; } static __be32 nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf) { DECODE_HEAD; status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid); if (status) return status; READ_BUF(4); READ32(open_conf->oc_seqid); DECODE_TAIL; } static __be32 nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down) { DECODE_HEAD; status = nfsd4_decode_stateid(argp, &open_down->od_stateid); if (status) return status; READ_BUF(4); READ32(open_down->od_seqid); status = nfsd4_decode_share_access(argp, &open_down->od_share_access, &open_down->od_deleg_want, NULL); if (status) return status; status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny); if (status) return status; DECODE_TAIL; } static __be32 nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh) { DECODE_HEAD; READ_BUF(4); READ32(putfh->pf_fhlen); if (putfh->pf_fhlen > NFS4_FHSIZE) goto xdr_error; READ_BUF(putfh->pf_fhlen); SAVEMEM(putfh->pf_fhval, putfh->pf_fhlen); DECODE_TAIL; } static __be32 nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read) { DECODE_HEAD; status = nfsd4_decode_stateid(argp, &read->rd_stateid); if (status) return status; READ_BUF(12); READ64(read->rd_offset); READ32(read->rd_length); DECODE_TAIL; } static __be32 nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir) { DECODE_HEAD; READ_BUF(24); READ64(readdir->rd_cookie); COPYMEM(readdir->rd_verf.data, sizeof(readdir->rd_verf.data)); READ32(readdir->rd_dircount); /* just in case you needed a useless field... */ READ32(readdir->rd_maxcount); if ((status = nfsd4_decode_bitmap(argp, readdir->rd_bmval))) goto out; DECODE_TAIL; } static __be32 nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove) { DECODE_HEAD; READ_BUF(4); READ32(remove->rm_namelen); READ_BUF(remove->rm_namelen); SAVEMEM(remove->rm_name, remove->rm_namelen); if ((status = check_filename(remove->rm_name, remove->rm_namelen, nfserr_noent))) return status; DECODE_TAIL; } static __be32 nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename) { DECODE_HEAD; READ_BUF(4); READ32(rename->rn_snamelen); READ_BUF(rename->rn_snamelen + 4); SAVEMEM(rename->rn_sname, rename->rn_snamelen); READ32(rename->rn_tnamelen); READ_BUF(rename->rn_tnamelen); SAVEMEM(rename->rn_tname, rename->rn_tnamelen); if ((status = check_filename(rename->rn_sname, rename->rn_snamelen, nfserr_noent))) return status; if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen, nfserr_inval))) return status; DECODE_TAIL; } static __be32 nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid) { DECODE_HEAD; READ_BUF(sizeof(clientid_t)); COPYMEM(clientid, sizeof(clientid_t)); DECODE_TAIL; } static __be32 nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp, struct nfsd4_secinfo *secinfo) { DECODE_HEAD; READ_BUF(4); READ32(secinfo->si_namelen); READ_BUF(secinfo->si_namelen); SAVEMEM(secinfo->si_name, secinfo->si_namelen); status = check_filename(secinfo->si_name, secinfo->si_namelen, nfserr_noent); if (status) return status; DECODE_TAIL; } static __be32 nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp, struct nfsd4_secinfo_no_name *sin) { DECODE_HEAD; READ_BUF(4); READ32(sin->sin_style); DECODE_TAIL; } static __be32 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) { __be32 status; status = nfsd4_decode_stateid(argp, &setattr->sa_stateid); if (status) return status; return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr, &setattr->sa_acl); } static __be32 nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid) { DECODE_HEAD; READ_BUF(NFS4_VERIFIER_SIZE); COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE); status = nfsd4_decode_opaque(argp, &setclientid->se_name); if (status) return nfserr_bad_xdr; READ_BUF(8); READ32(setclientid->se_callback_prog); READ32(setclientid->se_callback_netid_len); READ_BUF(setclientid->se_callback_netid_len + 4); SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len); READ32(setclientid->se_callback_addr_len); READ_BUF(setclientid->se_callback_addr_len + 4); SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len); READ32(setclientid->se_callback_ident); DECODE_TAIL; } static __be32 nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c) { DECODE_HEAD; READ_BUF(8 + NFS4_VERIFIER_SIZE); COPYMEM(&scd_c->sc_clientid, 8); COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE); DECODE_TAIL; } /* Also used for NVERIFY */ static __be32 nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify) { #if 0 struct nfsd4_compoundargs save = { .p = argp->p, .end = argp->end, .rqstp = argp->rqstp, }; u32 ve_bmval[2]; struct iattr ve_iattr; /* request */ struct nfs4_acl *ve_acl; /* request */ #endif DECODE_HEAD; if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval))) goto out; /* For convenience's sake, we compare raw xdr'd attributes in * nfsd4_proc_verify; however we still decode here just to return * correct error in case of bad xdr. */ #if 0 status = nfsd4_decode_fattr(ve_bmval, &ve_iattr, &ve_acl); if (status == nfserr_inval) { status = nfserrno(status); goto out; } #endif READ_BUF(4); READ32(verify->ve_attrlen); READ_BUF(verify->ve_attrlen); SAVEMEM(verify->ve_attrval, verify->ve_attrlen); DECODE_TAIL; } static __be32 nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) { int avail; int v; int len; DECODE_HEAD; status = nfsd4_decode_stateid(argp, &write->wr_stateid); if (status) return status; READ_BUF(16); READ64(write->wr_offset); READ32(write->wr_stable_how); if (write->wr_stable_how > 2) goto xdr_error; READ32(write->wr_buflen); /* Sorry .. no magic macros for this.. * * READ_BUF(write->wr_buflen); * SAVEMEM(write->wr_buf, write->wr_buflen); */ avail = (char*)argp->end - (char*)argp->p; if (avail + argp->pagelen < write->wr_buflen) { dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__); goto xdr_error; } argp->rqstp->rq_vec[0].iov_base = p; argp->rqstp->rq_vec[0].iov_len = avail; v = 0; len = write->wr_buflen; while (len > argp->rqstp->rq_vec[v].iov_len) { len -= argp->rqstp->rq_vec[v].iov_len; v++; argp->rqstp->rq_vec[v].iov_base = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen >= PAGE_SIZE) { argp->rqstp->rq_vec[v].iov_len = PAGE_SIZE; argp->pagelen -= PAGE_SIZE; } else { argp->rqstp->rq_vec[v].iov_len = argp->pagelen; argp->pagelen -= len; } } argp->end = (__be32*) (argp->rqstp->rq_vec[v].iov_base + argp->rqstp->rq_vec[v].iov_len); argp->p = (__be32*) (argp->rqstp->rq_vec[v].iov_base + (XDR_QUADLEN(len) << 2)); argp->rqstp->rq_vec[v].iov_len = len; write->wr_vlen = v+1; DECODE_TAIL; } static __be32 nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner) { DECODE_HEAD; READ_BUF(12); COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t)); READ32(rlockowner->rl_owner.len); READ_BUF(rlockowner->rl_owner.len); READMEM(rlockowner->rl_owner.data, rlockowner->rl_owner.len); if (argp->minorversion && !zero_clientid(&rlockowner->rl_clientid)) return nfserr_inval; DECODE_TAIL; } static __be32 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp, struct nfsd4_exchange_id *exid) { int dummy, tmp; DECODE_HEAD; READ_BUF(NFS4_VERIFIER_SIZE); COPYMEM(exid->verifier.data, NFS4_VERIFIER_SIZE); status = nfsd4_decode_opaque(argp, &exid->clname); if (status) return nfserr_bad_xdr; READ_BUF(4); READ32(exid->flags); /* Ignore state_protect4_a */ READ_BUF(4); READ32(exid->spa_how); switch (exid->spa_how) { case SP4_NONE: break; case SP4_MACH_CRED: /* spo_must_enforce */ READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); p += dummy; /* spo_must_allow */ READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); p += dummy; break; case SP4_SSV: /* ssp_ops */ READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); p += dummy; READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); p += dummy; /* ssp_hash_algs<> */ READ_BUF(4); READ32(tmp); while (tmp--) { READ_BUF(4); READ32(dummy); READ_BUF(dummy); p += XDR_QUADLEN(dummy); } /* ssp_encr_algs<> */ READ_BUF(4); READ32(tmp); while (tmp--) { READ_BUF(4); READ32(dummy); READ_BUF(dummy); p += XDR_QUADLEN(dummy); } /* ssp_window and ssp_num_gss_handles */ READ_BUF(8); READ32(dummy); READ32(dummy); break; default: goto xdr_error; } /* Ignore Implementation ID */ READ_BUF(4); /* nfs_impl_id4 array length */ READ32(dummy); if (dummy > 1) goto xdr_error; if (dummy == 1) { /* nii_domain */ READ_BUF(4); READ32(dummy); READ_BUF(dummy); p += XDR_QUADLEN(dummy); /* nii_name */ READ_BUF(4); READ32(dummy); READ_BUF(dummy); p += XDR_QUADLEN(dummy); /* nii_date */ READ_BUF(12); p += 3; } DECODE_TAIL; } static __be32 nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, struct nfsd4_create_session *sess) { DECODE_HEAD; u32 dummy; char *machine_name; int i; int nr_secflavs; READ_BUF(16); COPYMEM(&sess->clientid, 8); READ32(sess->seqid); READ32(sess->flags); /* Fore channel attrs */ READ_BUF(28); READ32(dummy); /* headerpadsz is always 0 */ READ32(sess->fore_channel.maxreq_sz); READ32(sess->fore_channel.maxresp_sz); READ32(sess->fore_channel.maxresp_cached); READ32(sess->fore_channel.maxops); READ32(sess->fore_channel.maxreqs); READ32(sess->fore_channel.nr_rdma_attrs); if (sess->fore_channel.nr_rdma_attrs == 1) { READ_BUF(4); READ32(sess->fore_channel.rdma_attrs); } else if (sess->fore_channel.nr_rdma_attrs > 1) { dprintk("Too many fore channel attr bitmaps!\n"); goto xdr_error; } /* Back channel attrs */ READ_BUF(28); READ32(dummy); /* headerpadsz is always 0 */ READ32(sess->back_channel.maxreq_sz); READ32(sess->back_channel.maxresp_sz); READ32(sess->back_channel.maxresp_cached); READ32(sess->back_channel.maxops); READ32(sess->back_channel.maxreqs); READ32(sess->back_channel.nr_rdma_attrs); if (sess->back_channel.nr_rdma_attrs == 1) { READ_BUF(4); READ32(sess->back_channel.rdma_attrs); } else if (sess->back_channel.nr_rdma_attrs > 1) { dprintk("Too many back channel attr bitmaps!\n"); goto xdr_error; } READ_BUF(8); READ32(sess->callback_prog); /* callback_sec_params4 */ READ32(nr_secflavs); for (i = 0; i < nr_secflavs; ++i) { READ_BUF(4); READ32(dummy); switch (dummy) { case RPC_AUTH_NULL: /* Nothing to read */ break; case RPC_AUTH_UNIX: READ_BUF(8); /* stamp */ READ32(dummy); /* machine name */ READ32(dummy); READ_BUF(dummy); SAVEMEM(machine_name, dummy); /* uid, gid */ READ_BUF(8); READ32(sess->uid); READ32(sess->gid); /* more gids */ READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); break; case RPC_AUTH_GSS: dprintk("RPC_AUTH_GSS callback secflavor " "not supported!\n"); READ_BUF(8); /* gcbp_service */ READ32(dummy); /* gcbp_handle_from_server */ READ32(dummy); READ_BUF(dummy); p += XDR_QUADLEN(dummy); /* gcbp_handle_from_client */ READ_BUF(4); READ32(dummy); READ_BUF(dummy); break; default: dprintk("Illegal callback secflavor\n"); return nfserr_inval; } } DECODE_TAIL; } static __be32 nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_session *destroy_session) { DECODE_HEAD; READ_BUF(NFS4_MAX_SESSIONID_LEN); COPYMEM(destroy_session->sessionid.data, NFS4_MAX_SESSIONID_LEN); DECODE_TAIL; } static __be32 nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_free_stateid *free_stateid) { DECODE_HEAD; READ_BUF(sizeof(stateid_t)); READ32(free_stateid->fr_stateid.si_generation); COPYMEM(&free_stateid->fr_stateid.si_opaque, sizeof(stateid_opaque_t)); DECODE_TAIL; } static __be32 nfsd4_decode_sequence(struct nfsd4_compoundargs *argp, struct nfsd4_sequence *seq) { DECODE_HEAD; READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); COPYMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN); READ32(seq->seqid); READ32(seq->slotid); READ32(seq->maxslots); READ32(seq->cachethis); DECODE_TAIL; } static __be32 nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid) { int i; __be32 *p, status; struct nfsd4_test_stateid_id *stateid; READ_BUF(4); test_stateid->ts_num_ids = ntohl(*p++); INIT_LIST_HEAD(&test_stateid->ts_stateid_list); for (i = 0; i < test_stateid->ts_num_ids; i++) { stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL); if (!stateid) { status = nfserrno(-ENOMEM); goto out; } defer_free(argp, kfree, stateid); INIT_LIST_HEAD(&stateid->ts_id_list); list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list); status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid); if (status) goto out; } status = 0; out: return status; xdr_error: dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__); status = nfserr_bad_xdr; goto out; } static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc) { DECODE_HEAD; READ_BUF(8); COPYMEM(&dc->clientid, 8); DECODE_TAIL; } static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc) { DECODE_HEAD; READ_BUF(4); READ32(rc->rca_one_fs); DECODE_TAIL; } static __be32 nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p) { return nfs_ok; } static __be32 nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p) { return nfserr_notsupp; } typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *); static nfsd4_dec nfsd4_dec_ops[] = { [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access, [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close, [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit, [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create, [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn, [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr, [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_LINK] = (nfsd4_dec)nfsd4_decode_link, [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock, [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt, [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku, [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup, [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop, [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify, [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open, [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm, [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade, [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh, [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_READ] = (nfsd4_dec)nfsd4_decode_read, [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir, [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop, [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove, [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename, [OP_RENEW] = (nfsd4_dec)nfsd4_decode_renew, [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo, [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr, [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_setclientid, [OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm, [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify, [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write, [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner, }; static nfsd4_dec nfsd41_dec_ops[] = { [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access, [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close, [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit, [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create, [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn, [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr, [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_LINK] = (nfsd4_dec)nfsd4_decode_link, [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock, [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt, [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku, [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup, [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop, [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify, [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open, [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade, [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh, [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_READ] = (nfsd4_dec)nfsd4_decode_read, [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir, [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop, [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove, [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename, [OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop, [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo, [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr, [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp, [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify, [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write, [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp, /* new operations for NFSv4.1 */ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session, [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session, [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid, [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence, [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid, [OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid, [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete, }; struct nfsd4_minorversion_ops { nfsd4_dec *decoders; int nops; }; static struct nfsd4_minorversion_ops nfsd4_minorversion[] = { [0] = { nfsd4_dec_ops, ARRAY_SIZE(nfsd4_dec_ops) }, [1] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) }, }; static __be32 nfsd4_decode_compound(struct nfsd4_compoundargs *argp) { DECODE_HEAD; struct nfsd4_op *op; struct nfsd4_minorversion_ops *ops; bool cachethis = false; int i; /* * XXX: According to spec, we should check the tag * for UTF-8 compliance. I'm postponing this for * now because it seems that some clients do use * binary tags. */ READ_BUF(4); READ32(argp->taglen); READ_BUF(argp->taglen + 8); SAVEMEM(argp->tag, argp->taglen); READ32(argp->minorversion); READ32(argp->opcnt); if (argp->taglen > NFSD4_MAX_TAGLEN) goto xdr_error; if (argp->opcnt > 100) goto xdr_error; if (argp->opcnt > ARRAY_SIZE(argp->iops)) { argp->ops = kmalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); if (!argp->ops) { argp->ops = argp->iops; dprintk("nfsd: couldn't allocate room for COMPOUND\n"); goto xdr_error; } } if (argp->minorversion >= ARRAY_SIZE(nfsd4_minorversion)) argp->opcnt = 0; ops = &nfsd4_minorversion[argp->minorversion]; for (i = 0; i < argp->opcnt; i++) { op = &argp->ops[i]; op->replay = NULL; /* * We can't use READ_BUF() here because we need to handle * a missing opcode as an OP_WRITE + 1. So we need to check * to see if we're truly at the end of our buffer or if there * is another page we need to flip to. */ if (argp->p == argp->end) { if (argp->pagelen < 4) { /* There isn't an opcode still on the wire */ op->opnum = OP_WRITE + 1; op->status = nfserr_bad_xdr; argp->opcnt = i+1; break; } /* * False alarm. We just hit a page boundary, but there * is still data available. Move pointer across page * boundary. *snip from READ_BUF* */ argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { argp->end = argp->p + (argp->pagelen>>2); argp->pagelen = 0; } else { argp->end = argp->p + (PAGE_SIZE>>2); argp->pagelen -= PAGE_SIZE; } } op->opnum = ntohl(*argp->p++); if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP) op->status = ops->decoders[op->opnum](argp, &op->u); else { op->opnum = OP_ILLEGAL; op->status = nfserr_op_illegal; } if (op->status) { argp->opcnt = i+1; break; } /* * We'll try to cache the result in the DRC if any one * op in the compound wants to be cached: */ cachethis |= nfsd4_cache_this_op(op); } /* Sessions make the DRC unnecessary: */ if (argp->minorversion) cachethis = false; argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE; DECODE_TAIL; } #define WRITE32(n) *p++ = htonl(n) #define WRITE64(n) do { \ *p++ = htonl((u32)((n) >> 32)); \ *p++ = htonl((u32)(n)); \ } while (0) #define WRITEMEM(ptr,nbytes) do { if (nbytes > 0) { \ *(p + XDR_QUADLEN(nbytes) -1) = 0; \ memcpy(p, ptr, nbytes); \ p += XDR_QUADLEN(nbytes); \ }} while (0) static void write32(__be32 **p, u32 n) { *(*p)++ = n; } static void write64(__be32 **p, u64 n) { write32(p, (u32)(n >> 32)); write32(p, (u32)n); } static void write_change(__be32 **p, struct kstat *stat, struct inode *inode) { if (IS_I_VERSION(inode)) { write64(p, inode->i_version); } else { write32(p, stat->ctime.tv_sec); write32(p, stat->ctime.tv_nsec); } } static void write_cinfo(__be32 **p, struct nfsd4_change_info *c) { write32(p, c->atomic); if (c->change_supported) { write64(p, c->before_change); write64(p, c->after_change); } else { write32(p, c->before_ctime_sec); write32(p, c->before_ctime_nsec); write32(p, c->after_ctime_sec); write32(p, c->after_ctime_nsec); } } #define RESERVE_SPACE(nbytes) do { \ p = resp->p; \ BUG_ON(p + XDR_QUADLEN(nbytes) > resp->end); \ } while (0) #define ADJUST_ARGS() resp->p = p /* * Header routine to setup seqid operation replay cache */ #define ENCODE_SEQID_OP_HEAD \ __be32 *save; \ \ save = resp->p; /* * Routine for encoding the result of a "seqid-mutating" NFSv4 operation. This * is where sequence id's are incremented, and the replay cache is filled. * Note that we increment sequence id's here, at the last moment, so we're sure * we know whether the error to be returned is a sequence id mutating error. */ static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, __be32 nfserr) { struct nfs4_stateowner *stateowner = resp->cstate.replay_owner; if (seqid_mutating_err(ntohl(nfserr)) && stateowner) { stateowner->so_seqid++; stateowner->so_replay.rp_status = nfserr; stateowner->so_replay.rp_buflen = (char *)resp->p - (char *)save; memcpy(stateowner->so_replay.rp_buf, save, stateowner->so_replay.rp_buflen); nfsd4_purge_closed_stateid(stateowner); } } /* Encode as an array of strings the string given with components * separated @sep. */ static __be32 nfsd4_encode_components(char sep, char *components, __be32 **pp, int *buflen) { __be32 *p = *pp; __be32 *countp = p; int strlen, count=0; char *str, *end; dprintk("nfsd4_encode_components(%s)\n", components); if ((*buflen -= 4) < 0) return nfserr_resource; WRITE32(0); /* We will fill this in with @count later */ end = str = components; while (*end) { for (; *end && (*end != sep); end++) ; /* Point to end of component */ strlen = end - str; if (strlen) { if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0) return nfserr_resource; WRITE32(strlen); WRITEMEM(str, strlen); count++; } else end++; str = end; } *pp = p; p = countp; WRITE32(count); return 0; } /* * encode a location element of a fs_locations structure */ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location, __be32 **pp, int *buflen) { __be32 status; __be32 *p = *pp; status = nfsd4_encode_components(':', location->hosts, &p, buflen); if (status) return status; status = nfsd4_encode_components('/', location->path, &p, buflen); if (status) return status; *pp = p; return 0; } /* * Encode a path in RFC3530 'pathname4' format */ static __be32 nfsd4_encode_path(const struct path *root, const struct path *path, __be32 **pp, int *buflen) { struct path cur = { .mnt = path->mnt, .dentry = path->dentry, }; __be32 *p = *pp; struct dentry **components = NULL; unsigned int ncomponents = 0; __be32 err = nfserr_jukebox; dprintk("nfsd4_encode_components("); path_get(&cur); /* First walk the path up to the nfsd root, and store the * dentries/path components in an array. */ for (;;) { if (cur.dentry == root->dentry && cur.mnt == root->mnt) break; if (cur.dentry == cur.mnt->mnt_root) { if (follow_up(&cur)) continue; goto out_free; } if ((ncomponents & 15) == 0) { struct dentry **new; new = krealloc(components, sizeof(*new) * (ncomponents + 16), GFP_KERNEL); if (!new) goto out_free; components = new; } components[ncomponents++] = cur.dentry; cur.dentry = dget_parent(cur.dentry); } *buflen -= 4; if (*buflen < 0) goto out_free; WRITE32(ncomponents); while (ncomponents) { struct dentry *dentry = components[ncomponents - 1]; unsigned int len = dentry->d_name.len; *buflen -= 4 + (XDR_QUADLEN(len) << 2); if (*buflen < 0) goto out_free; WRITE32(len); WRITEMEM(dentry->d_name.name, len); dprintk("/%s", dentry->d_name.name); dput(dentry); ncomponents--; } *pp = p; err = 0; out_free: dprintk(")\n"); while (ncomponents) dput(components[--ncomponents]); kfree(components); path_put(&cur); return err; } static __be32 nfsd4_encode_fsloc_fsroot(struct svc_rqst *rqstp, const struct path *path, __be32 **pp, int *buflen) { struct svc_export *exp_ps; __be32 res; exp_ps = rqst_find_fsidzero_export(rqstp); if (IS_ERR(exp_ps)) return nfserrno(PTR_ERR(exp_ps)); res = nfsd4_encode_path(&exp_ps->ex_path, path, pp, buflen); exp_put(exp_ps); return res; } /* * encode a fs_locations structure */ static __be32 nfsd4_encode_fs_locations(struct svc_rqst *rqstp, struct svc_export *exp, __be32 **pp, int *buflen) { __be32 status; int i; __be32 *p = *pp; struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs; status = nfsd4_encode_fsloc_fsroot(rqstp, &exp->ex_path, &p, buflen); if (status) return status; if ((*buflen -= 4) < 0) return nfserr_resource; WRITE32(fslocs->locations_count); for (i=0; i<fslocs->locations_count; i++) { status = nfsd4_encode_fs_location4(&fslocs->locations[i], &p, buflen); if (status) return status; } *pp = p; return 0; } static u32 nfs4_file_type(umode_t mode) { switch (mode & S_IFMT) { case S_IFIFO: return NF4FIFO; case S_IFCHR: return NF4CHR; case S_IFDIR: return NF4DIR; case S_IFBLK: return NF4BLK; case S_IFLNK: return NF4LNK; case S_IFREG: return NF4REG; case S_IFSOCK: return NF4SOCK; default: return NF4BAD; }; } static __be32 nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group, __be32 **p, int *buflen) { int status; if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4) return nfserr_resource; if (whotype != NFS4_ACL_WHO_NAMED) status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1)); else if (group) status = nfsd_map_gid_to_name(rqstp, id, (u8 *)(*p + 1)); else status = nfsd_map_uid_to_name(rqstp, id, (u8 *)(*p + 1)); if (status < 0) return nfserrno(status); *p = xdr_encode_opaque(*p, NULL, status); *buflen -= (XDR_QUADLEN(status) << 2) + 4; BUG_ON(*buflen < 0); return 0; } static inline __be32 nfsd4_encode_user(struct svc_rqst *rqstp, uid_t uid, __be32 **p, int *buflen) { return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, uid, 0, p, buflen); } static inline __be32 nfsd4_encode_group(struct svc_rqst *rqstp, uid_t gid, __be32 **p, int *buflen) { return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, gid, 1, p, buflen); } static inline __be32 nfsd4_encode_aclname(struct svc_rqst *rqstp, int whotype, uid_t id, int group, __be32 **p, int *buflen) { return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen); } #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ FATTR4_WORD0_RDATTR_ERROR) #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) { /* As per referral draft: */ if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || *bmval1 & ~WORD1_ABSENT_FS_ATTRS) { if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR || *bmval0 & FATTR4_WORD0_FS_LOCATIONS) *rdattr_err = NFSERR_MOVED; else return nfserr_moved; } *bmval0 &= WORD0_ABSENT_FS_ATTRS; *bmval1 &= WORD1_ABSENT_FS_ATTRS; return 0; } /* * Note: @fhp can be NULL; in this case, we might have to compose the filehandle * ourselves. * * @countp is the buffer size in _words_; upon successful return this becomes * replaced with the number of words written. */ __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, __be32 *buffer, int *countp, u32 *bmval, struct svc_rqst *rqstp, int ignore_crossmnt) { u32 bmval0 = bmval[0]; u32 bmval1 = bmval[1]; u32 bmval2 = bmval[2]; struct kstat stat; struct svc_fh tempfh; struct kstatfs statfs; int buflen = *countp << 2; __be32 *attrlenp; u32 dummy; u64 dummy64; u32 rdattr_err = 0; __be32 *p = buffer; __be32 status; int err; int aclsupport = 0; struct nfs4_acl *acl = NULL; struct nfsd4_compoundres *resp = rqstp->rq_resp; u32 minorversion = resp->cstate.minorversion; struct path path = { .mnt = exp->ex_path.mnt, .dentry = dentry, }; BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1); BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion)); BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion)); BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); if (exp->ex_fslocs.migrated) { BUG_ON(bmval[2]); status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err); if (status) goto out; } err = vfs_getattr(exp->ex_path.mnt, dentry, &stat); if (err) goto out_nfserr; if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) || (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL))) { err = vfs_statfs(&path, &statfs); if (err) goto out_nfserr; } if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) { fh_init(&tempfh, NFS4_FHSIZE); status = fh_compose(&tempfh, exp, dentry, NULL); if (status) goto out; fhp = &tempfh; } if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_SUPPORTED_ATTRS)) { err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl); aclsupport = (err == 0); if (bmval0 & FATTR4_WORD0_ACL) { if (err == -EOPNOTSUPP) bmval0 &= ~FATTR4_WORD0_ACL; else if (err == -EINVAL) { status = nfserr_attrnotsupp; goto out; } else if (err != 0) goto out_nfserr; } } if (bmval2) { if ((buflen -= 16) < 0) goto out_resource; WRITE32(3); WRITE32(bmval0); WRITE32(bmval1); WRITE32(bmval2); } else if (bmval1) { if ((buflen -= 12) < 0) goto out_resource; WRITE32(2); WRITE32(bmval0); WRITE32(bmval1); } else { if ((buflen -= 8) < 0) goto out_resource; WRITE32(1); WRITE32(bmval0); } attrlenp = p++; /* to be backfilled later */ if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { u32 word0 = nfsd_suppattrs0(minorversion); u32 word1 = nfsd_suppattrs1(minorversion); u32 word2 = nfsd_suppattrs2(minorversion); if (!aclsupport) word0 &= ~FATTR4_WORD0_ACL; if (!word2) { if ((buflen -= 12) < 0) goto out_resource; WRITE32(2); WRITE32(word0); WRITE32(word1); } else { if ((buflen -= 16) < 0) goto out_resource; WRITE32(3); WRITE32(word0); WRITE32(word1); WRITE32(word2); } } if (bmval0 & FATTR4_WORD0_TYPE) { if ((buflen -= 4) < 0) goto out_resource; dummy = nfs4_file_type(stat.mode); if (dummy == NF4BAD) goto out_serverfault; WRITE32(dummy); } if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) { if ((buflen -= 4) < 0) goto out_resource; if (exp->ex_flags & NFSEXP_NOSUBTREECHECK) WRITE32(NFS4_FH_PERSISTENT); else WRITE32(NFS4_FH_PERSISTENT|NFS4_FH_VOL_RENAME); } if (bmval0 & FATTR4_WORD0_CHANGE) { if ((buflen -= 8) < 0) goto out_resource; write_change(&p, &stat, dentry->d_inode); } if (bmval0 & FATTR4_WORD0_SIZE) { if ((buflen -= 8) < 0) goto out_resource; WRITE64(stat.size); } if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_NAMED_ATTR) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(0); } if (bmval0 & FATTR4_WORD0_FSID) { if ((buflen -= 16) < 0) goto out_resource; if (exp->ex_fslocs.migrated) { WRITE64(NFS4_REFERRAL_FSID_MAJOR); WRITE64(NFS4_REFERRAL_FSID_MINOR); } else switch(fsid_source(fhp)) { case FSIDSOURCE_FSID: WRITE64((u64)exp->ex_fsid); WRITE64((u64)0); break; case FSIDSOURCE_DEV: WRITE32(0); WRITE32(MAJOR(stat.dev)); WRITE32(0); WRITE32(MINOR(stat.dev)); break; case FSIDSOURCE_UUID: WRITEMEM(exp->ex_uuid, 16); break; } } if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(0); } if (bmval0 & FATTR4_WORD0_LEASE_TIME) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(nfsd4_lease); } if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(rdattr_err); } if (bmval0 & FATTR4_WORD0_ACL) { struct nfs4_ace *ace; if (acl == NULL) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(0); goto out_acl; } if ((buflen -= 4) < 0) goto out_resource; WRITE32(acl->naces); for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) { if ((buflen -= 4*3) < 0) goto out_resource; WRITE32(ace->type); WRITE32(ace->flag); WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL); status = nfsd4_encode_aclname(rqstp, ace->whotype, ace->who, ace->flag & NFS4_ACE_IDENTIFIER_GROUP, &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } } out_acl: if (bmval0 & FATTR4_WORD0_ACLSUPPORT) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(aclsupport ? ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0); } if (bmval0 & FATTR4_WORD0_CANSETTIME) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(0); } if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_FILEHANDLE) { buflen -= (XDR_QUADLEN(fhp->fh_handle.fh_size) << 2) + 4; if (buflen < 0) goto out_resource; WRITE32(fhp->fh_handle.fh_size); WRITEMEM(&fhp->fh_handle.fh_base, fhp->fh_handle.fh_size); } if (bmval0 & FATTR4_WORD0_FILEID) { if ((buflen -= 8) < 0) goto out_resource; WRITE64(stat.ino); } if (bmval0 & FATTR4_WORD0_FILES_AVAIL) { if ((buflen -= 8) < 0) goto out_resource; WRITE64((u64) statfs.f_ffree); } if (bmval0 & FATTR4_WORD0_FILES_FREE) { if ((buflen -= 8) < 0) goto out_resource; WRITE64((u64) statfs.f_ffree); } if (bmval0 & FATTR4_WORD0_FILES_TOTAL) { if ((buflen -= 8) < 0) goto out_resource; WRITE64((u64) statfs.f_files); } if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) { status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval0 & FATTR4_WORD0_MAXFILESIZE) { if ((buflen -= 8) < 0) goto out_resource; WRITE64(~(u64)0); } if (bmval0 & FATTR4_WORD0_MAXLINK) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(255); } if (bmval0 & FATTR4_WORD0_MAXNAME) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(statfs.f_namelen); } if (bmval0 & FATTR4_WORD0_MAXREAD) { if ((buflen -= 8) < 0) goto out_resource; WRITE64((u64) svc_max_payload(rqstp)); } if (bmval0 & FATTR4_WORD0_MAXWRITE) { if ((buflen -= 8) < 0) goto out_resource; WRITE64((u64) svc_max_payload(rqstp)); } if (bmval1 & FATTR4_WORD1_MODE) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(stat.mode & S_IALLUGO); } if (bmval1 & FATTR4_WORD1_NO_TRUNC) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(1); } if (bmval1 & FATTR4_WORD1_NUMLINKS) { if ((buflen -= 4) < 0) goto out_resource; WRITE32(stat.nlink); } if (bmval1 & FATTR4_WORD1_OWNER) { status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } if (bmval1 & FATTR4_WORD1_OWNER_GROUP) { status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } if (bmval1 & FATTR4_WORD1_RAWDEV) { if ((buflen -= 8) < 0) goto out_resource; WRITE32((u32) MAJOR(stat.rdev)); WRITE32((u32) MINOR(stat.rdev)); } if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) { if ((buflen -= 8) < 0) goto out_resource; dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize; WRITE64(dummy64); } if (bmval1 & FATTR4_WORD1_SPACE_FREE) { if ((buflen -= 8) < 0) goto out_resource; dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize; WRITE64(dummy64); } if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) { if ((buflen -= 8) < 0) goto out_resource; dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize; WRITE64(dummy64); } if (bmval1 & FATTR4_WORD1_SPACE_USED) { if ((buflen -= 8) < 0) goto out_resource; dummy64 = (u64)stat.blocks << 9; WRITE64(dummy64); } if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { if ((buflen -= 12) < 0) goto out_resource; WRITE64((s64)stat.atime.tv_sec); WRITE32(stat.atime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_DELTA) { if ((buflen -= 12) < 0) goto out_resource; WRITE32(0); WRITE32(1); WRITE32(0); } if (bmval1 & FATTR4_WORD1_TIME_METADATA) { if ((buflen -= 12) < 0) goto out_resource; WRITE64((s64)stat.ctime.tv_sec); WRITE32(stat.ctime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { if ((buflen -= 12) < 0) goto out_resource; WRITE64((s64)stat.mtime.tv_sec); WRITE32(stat.mtime.tv_nsec); } if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { if ((buflen -= 8) < 0) goto out_resource; /* * Get parent's attributes if not ignoring crossmount * and this is the root of a cross-mounted filesystem. */ if (ignore_crossmnt == 0 && dentry == exp->ex_path.mnt->mnt_root) { struct path path = exp->ex_path; path_get(&path); while (follow_up(&path)) { if (path.dentry != path.mnt->mnt_root) break; } err = vfs_getattr(path.mnt, path.dentry, &stat); path_put(&path); if (err) goto out_nfserr; } WRITE64(stat.ino); } if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) { if ((buflen -= 16) < 0) goto out_resource; WRITE32(3); WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0); WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1); WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD2); } *attrlenp = htonl((char *)p - (char *)attrlenp - 4); *countp = p - buffer; status = nfs_ok; out: kfree(acl); if (fhp == &tempfh) fh_put(&tempfh); return status; out_nfserr: status = nfserrno(err); goto out; out_resource: *countp = 0; status = nfserr_resource; goto out; out_serverfault: status = nfserr_serverfault; goto out; } static inline int attributes_need_mount(u32 *bmval) { if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME)) return 1; if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID) return 1; return 0; } static __be32 nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd, const char *name, int namlen, __be32 *p, int *buflen) { struct svc_export *exp = cd->rd_fhp->fh_export; struct dentry *dentry; __be32 nfserr; int ignore_crossmnt = 0; dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen); if (IS_ERR(dentry)) return nfserrno(PTR_ERR(dentry)); if (!dentry->d_inode) { /* * nfsd_buffered_readdir drops the i_mutex between * readdir and calling this callback, leaving a window * where this directory entry could have gone away. */ dput(dentry); return nfserr_noent; } exp_get(exp); /* * In the case of a mountpoint, the client may be asking for * attributes that are only properties of the underlying filesystem * as opposed to the cross-mounted file system. In such a case, * we will not follow the cross mount and will fill the attribtutes * directly from the mountpoint dentry. */ if (nfsd_mountpoint(dentry, exp)) { int err; if (!(exp->ex_flags & NFSEXP_V4ROOT) && !attributes_need_mount(cd->rd_bmval)) { ignore_crossmnt = 1; goto out_encode; } /* * Why the heck aren't we just using nfsd_lookup?? * Different "."/".." handling? Something else? * At least, add a comment here to explain.... */ err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp); if (err) { nfserr = nfserrno(err); goto out_put; } nfserr = check_nfsd_access(exp, cd->rd_rqstp); if (nfserr) goto out_put; } out_encode: nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval, cd->rd_rqstp, ignore_crossmnt); out_put: dput(dentry); exp_put(exp); return nfserr; } static __be32 * nfsd4_encode_rdattr_error(__be32 *p, int buflen, __be32 nfserr) { __be32 *attrlenp; if (buflen < 6) return NULL; *p++ = htonl(2); *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */ *p++ = htonl(0); /* bmval1 */ attrlenp = p++; *p++ = nfserr; /* no htonl */ *attrlenp = htonl((char *)p - (char *)attrlenp - 4); return p; } static int nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_cd *ccd = ccdv; struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common); int buflen; __be32 *p = cd->buffer; __be32 *cookiep; __be32 nfserr = nfserr_toosmall; /* In nfsv4, "." and ".." never make it onto the wire.. */ if (name && isdotent(name, namlen)) { cd->common.err = nfs_ok; return 0; } if (cd->offset) xdr_encode_hyper(cd->offset, (u64) offset); buflen = cd->buflen - 4 - XDR_QUADLEN(namlen); if (buflen < 0) goto fail; *p++ = xdr_one; /* mark entry present */ cookiep = p; p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */ p = xdr_encode_array(p, name, namlen); /* name length & name */ nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, p, &buflen); switch (nfserr) { case nfs_ok: p += buflen; break; case nfserr_resource: nfserr = nfserr_toosmall; goto fail; case nfserr_noent: goto skip_entry; default: /* * If the client requested the RDATTR_ERROR attribute, * we stuff the error code into this attribute * and continue. If this attribute was not requested, * then in accordance with the spec, we fail the * entire READDIR operation(!) */ if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)) goto fail; p = nfsd4_encode_rdattr_error(p, buflen, nfserr); if (p == NULL) { nfserr = nfserr_toosmall; goto fail; } } cd->buflen -= (p - cd->buffer); cd->buffer = p; cd->offset = cookiep; skip_entry: cd->common.err = nfs_ok; return 0; fail: cd->common.err = nfserr; return -EINVAL; } static void nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid) { __be32 *p; RESERVE_SPACE(sizeof(stateid_t)); WRITE32(sid->si_generation); WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); ADJUST_ARGS(); } static __be32 nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access) { __be32 *p; if (!nfserr) { RESERVE_SPACE(8); WRITE32(access->ac_supported); WRITE32(access->ac_resp_access); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts) { __be32 *p; if (!nfserr) { RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8); WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); WRITE32(bcts->dir); /* XXX: ? */ WRITE32(0); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) { ENCODE_SEQID_OP_HEAD; if (!nfserr) nfsd4_encode_stateid(resp, &close->cl_stateid); encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit) { __be32 *p; if (!nfserr) { RESERVE_SPACE(NFS4_VERIFIER_SIZE); WRITEMEM(commit->co_verf.data, NFS4_VERIFIER_SIZE); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create) { __be32 *p; if (!nfserr) { RESERVE_SPACE(32); write_cinfo(&p, &create->cr_cinfo); WRITE32(2); WRITE32(create->cr_bmval[0]); WRITE32(create->cr_bmval[1]); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr) { struct svc_fh *fhp = getattr->ga_fhp; int buflen; if (nfserr) return nfserr; buflen = resp->end - resp->p - (COMPOUND_ERR_SLACK_SPACE >> 2); nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry, resp->p, &buflen, getattr->ga_bmval, resp->rqstp, 0); if (!nfserr) resp->p += buflen; return nfserr; } static __be32 nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp) { struct svc_fh *fhp = *fhpp; unsigned int len; __be32 *p; if (!nfserr) { len = fhp->fh_handle.fh_size; RESERVE_SPACE(len + 4); WRITE32(len); WRITEMEM(&fhp->fh_handle.fh_base, len); ADJUST_ARGS(); } return nfserr; } /* * Including all fields other than the name, a LOCK4denied structure requires * 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes. */ static void nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld) { struct xdr_netobj *conf = &ld->ld_owner; __be32 *p; RESERVE_SPACE(32 + XDR_LEN(conf->len)); WRITE64(ld->ld_start); WRITE64(ld->ld_length); WRITE32(ld->ld_type); if (conf->len) { WRITEMEM(&ld->ld_clientid, 8); WRITE32(conf->len); WRITEMEM(conf->data, conf->len); kfree(conf->data); } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ WRITE64((u64)0); /* clientid */ WRITE32(0); /* length of owner name */ } ADJUST_ARGS(); } static __be32 nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock) { ENCODE_SEQID_OP_HEAD; if (!nfserr) nfsd4_encode_stateid(resp, &lock->lk_resp_stateid); else if (nfserr == nfserr_denied) nfsd4_encode_lock_denied(resp, &lock->lk_denied); encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt) { if (nfserr == nfserr_denied) nfsd4_encode_lock_denied(resp, &lockt->lt_denied); return nfserr; } static __be32 nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku) { ENCODE_SEQID_OP_HEAD; if (!nfserr) nfsd4_encode_stateid(resp, &locku->lu_stateid); encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link) { __be32 *p; if (!nfserr) { RESERVE_SPACE(20); write_cinfo(&p, &link->li_cinfo); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) { __be32 *p; ENCODE_SEQID_OP_HEAD; if (nfserr) goto out; nfsd4_encode_stateid(resp, &open->op_stateid); RESERVE_SPACE(40); write_cinfo(&p, &open->op_cinfo); WRITE32(open->op_rflags); WRITE32(2); WRITE32(open->op_bmval[0]); WRITE32(open->op_bmval[1]); WRITE32(open->op_delegate_type); ADJUST_ARGS(); switch (open->op_delegate_type) { case NFS4_OPEN_DELEGATE_NONE: break; case NFS4_OPEN_DELEGATE_READ: nfsd4_encode_stateid(resp, &open->op_delegate_stateid); RESERVE_SPACE(20); WRITE32(open->op_recall); /* * TODO: ACE's in delegations */ WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE); WRITE32(0); WRITE32(0); WRITE32(0); /* XXX: is NULL principal ok? */ ADJUST_ARGS(); break; case NFS4_OPEN_DELEGATE_WRITE: nfsd4_encode_stateid(resp, &open->op_delegate_stateid); RESERVE_SPACE(32); WRITE32(0); /* * TODO: space_limit's in delegations */ WRITE32(NFS4_LIMIT_SIZE); WRITE32(~(u32)0); WRITE32(~(u32)0); /* * TODO: ACE's in delegations */ WRITE32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE); WRITE32(0); WRITE32(0); WRITE32(0); /* XXX: is NULL principal ok? */ ADJUST_ARGS(); break; case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */ switch (open->op_why_no_deleg) { case WND4_CONTENTION: case WND4_RESOURCE: RESERVE_SPACE(8); WRITE32(open->op_why_no_deleg); WRITE32(0); /* deleg signaling not supported yet */ break; default: RESERVE_SPACE(4); WRITE32(open->op_why_no_deleg); } ADJUST_ARGS(); break; default: BUG(); } /* XXX save filehandle here */ out: encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) { ENCODE_SEQID_OP_HEAD; if (!nfserr) nfsd4_encode_stateid(resp, &oc->oc_resp_stateid); encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) { ENCODE_SEQID_OP_HEAD; if (!nfserr) nfsd4_encode_stateid(resp, &od->od_stateid); encode_seqid_op_tail(resp, save, nfserr); return nfserr; } static __be32 nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_read *read) { u32 eof; int v, pn; unsigned long maxcount; long len; __be32 *p; if (nfserr) return nfserr; if (resp->xbuf->page_len) return nfserr_resource; RESERVE_SPACE(8); /* eof flag and byte count */ maxcount = svc_max_payload(resp->rqstp); if (maxcount > read->rd_length) maxcount = read->rd_length; len = maxcount; v = 0; while (len > 0) { pn = resp->rqstp->rq_resused; if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */ maxcount -= len; break; } resp->rqstp->rq_vec[v].iov_base = page_address(resp->rqstp->rq_respages[pn]); resp->rqstp->rq_vec[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE; resp->rqstp->rq_resused++; v++; len -= PAGE_SIZE; } read->rd_vlen = v; nfserr = nfsd_read_file(read->rd_rqstp, read->rd_fhp, read->rd_filp, read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen, &maxcount); if (nfserr) return nfserr; eof = (read->rd_offset + maxcount >= read->rd_fhp->fh_dentry->d_inode->i_size); WRITE32(eof); WRITE32(maxcount); ADJUST_ARGS(); resp->xbuf->head[0].iov_len = (char*)p - (char*)resp->xbuf->head[0].iov_base; resp->xbuf->page_len = maxcount; /* Use rest of head for padding and remaining ops: */ resp->xbuf->tail[0].iov_base = p; resp->xbuf->tail[0].iov_len = 0; if (maxcount&3) { RESERVE_SPACE(4); WRITE32(0); resp->xbuf->tail[0].iov_base += maxcount&3; resp->xbuf->tail[0].iov_len = 4 - (maxcount&3); ADJUST_ARGS(); } return 0; } static __be32 nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink) { int maxcount; char *page; __be32 *p; if (nfserr) return nfserr; if (resp->xbuf->page_len) return nfserr_resource; if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) return nfserr_resource; page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); maxcount = PAGE_SIZE; RESERVE_SPACE(4); /* * XXX: By default, the ->readlink() VFS op will truncate symlinks * if they would overflow the buffer. Is this kosher in NFSv4? If * not, one easy fix is: if ->readlink() precisely fills the buffer, * assume that truncation occurred, and return NFS4ERR_RESOURCE. */ nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp, page, &maxcount); if (nfserr == nfserr_isdir) return nfserr_inval; if (nfserr) return nfserr; WRITE32(maxcount); ADJUST_ARGS(); resp->xbuf->head[0].iov_len = (char*)p - (char*)resp->xbuf->head[0].iov_base; resp->xbuf->page_len = maxcount; /* Use rest of head for padding and remaining ops: */ resp->xbuf->tail[0].iov_base = p; resp->xbuf->tail[0].iov_len = 0; if (maxcount&3) { RESERVE_SPACE(4); WRITE32(0); resp->xbuf->tail[0].iov_base += maxcount&3; resp->xbuf->tail[0].iov_len = 4 - (maxcount&3); ADJUST_ARGS(); } return 0; } static __be32 nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir) { int maxcount; loff_t offset; __be32 *page, *savep, *tailbase; __be32 *p; if (nfserr) return nfserr; if (resp->xbuf->page_len) return nfserr_resource; if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) return nfserr_resource; RESERVE_SPACE(NFS4_VERIFIER_SIZE); savep = p; /* XXX: Following NFSv3, we ignore the READDIR verifier for now. */ WRITE32(0); WRITE32(0); ADJUST_ARGS(); resp->xbuf->head[0].iov_len = ((char*)resp->p) - (char*)resp->xbuf->head[0].iov_base; tailbase = p; maxcount = PAGE_SIZE; if (maxcount > readdir->rd_maxcount) maxcount = readdir->rd_maxcount; /* * Convert from bytes to words, account for the two words already * written, make sure to leave two words at the end for the next * pointer and eof field. */ maxcount = (maxcount >> 2) - 4; if (maxcount < 0) { nfserr = nfserr_toosmall; goto err_no_verf; } page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); readdir->common.err = 0; readdir->buflen = maxcount; readdir->buffer = page; readdir->offset = NULL; offset = readdir->rd_cookie; nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp, &offset, &readdir->common, nfsd4_encode_dirent); if (nfserr == nfs_ok && readdir->common.err == nfserr_toosmall && readdir->buffer == page) nfserr = nfserr_toosmall; if (nfserr) goto err_no_verf; if (readdir->offset) xdr_encode_hyper(readdir->offset, offset); p = readdir->buffer; *p++ = 0; /* no more entries */ *p++ = htonl(readdir->common.err == nfserr_eof); resp->xbuf->page_len = ((char*)p) - (char*)page_address( resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]); /* Use rest of head for padding and remaining ops: */ resp->xbuf->tail[0].iov_base = tailbase; resp->xbuf->tail[0].iov_len = 0; resp->p = resp->xbuf->tail[0].iov_base; resp->end = resp->p + (PAGE_SIZE - resp->xbuf->head[0].iov_len)/4; return 0; err_no_verf: p = savep; ADJUST_ARGS(); return nfserr; } static __be32 nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove) { __be32 *p; if (!nfserr) { RESERVE_SPACE(20); write_cinfo(&p, &remove->rm_cinfo); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename) { __be32 *p; if (!nfserr) { RESERVE_SPACE(40); write_cinfo(&p, &rename->rn_sinfo); write_cinfo(&p, &rename->rn_tinfo); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,struct svc_export *exp) { int i = 0; u32 nflavs; struct exp_flavor_info *flavs; struct exp_flavor_info def_flavs[2]; __be32 *p; if (nfserr) goto out; if (exp->ex_nflavors) { flavs = exp->ex_flavors; nflavs = exp->ex_nflavors; } else { /* Handling of some defaults in absence of real secinfo: */ flavs = def_flavs; if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) { nflavs = 2; flavs[0].pseudoflavor = RPC_AUTH_UNIX; flavs[1].pseudoflavor = RPC_AUTH_NULL; } else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) { nflavs = 1; flavs[0].pseudoflavor = svcauth_gss_flavor(exp->ex_client); } else { nflavs = 1; flavs[0].pseudoflavor = exp->ex_client->flavour->flavour; } } RESERVE_SPACE(4); WRITE32(nflavs); ADJUST_ARGS(); for (i = 0; i < nflavs; i++) { u32 flav = flavs[i].pseudoflavor; struct gss_api_mech *gm = gss_mech_get_by_pseudoflavor(flav); if (gm) { RESERVE_SPACE(4); WRITE32(RPC_AUTH_GSS); ADJUST_ARGS(); RESERVE_SPACE(4 + gm->gm_oid.len); WRITE32(gm->gm_oid.len); WRITEMEM(gm->gm_oid.data, gm->gm_oid.len); ADJUST_ARGS(); RESERVE_SPACE(4); WRITE32(0); /* qop */ ADJUST_ARGS(); RESERVE_SPACE(4); WRITE32(gss_pseudoflavor_to_service(gm, flav)); ADJUST_ARGS(); gss_mech_put(gm); } else { RESERVE_SPACE(4); WRITE32(flav); ADJUST_ARGS(); } } out: if (exp) exp_put(exp); return nfserr; } static __be32 nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_secinfo *secinfo) { return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp); } static __be32 nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_secinfo_no_name *secinfo) { return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp); } /* * The SETATTR encode routine is special -- it always encodes a bitmap, * regardless of the error status. */ static __be32 nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr) { __be32 *p; RESERVE_SPACE(12); if (nfserr) { WRITE32(2); WRITE32(0); WRITE32(0); } else { WRITE32(2); WRITE32(setattr->sa_bmval[0]); WRITE32(setattr->sa_bmval[1]); } ADJUST_ARGS(); return nfserr; } static __be32 nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd) { __be32 *p; if (!nfserr) { RESERVE_SPACE(8 + NFS4_VERIFIER_SIZE); WRITEMEM(&scd->se_clientid, 8); WRITEMEM(&scd->se_confirm, NFS4_VERIFIER_SIZE); ADJUST_ARGS(); } else if (nfserr == nfserr_clid_inuse) { RESERVE_SPACE(8); WRITE32(0); WRITE32(0); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write) { __be32 *p; if (!nfserr) { RESERVE_SPACE(16); WRITE32(write->wr_bytes_written); WRITE32(write->wr_how_written); WRITEMEM(write->wr_verifier.data, NFS4_VERIFIER_SIZE); ADJUST_ARGS(); } return nfserr; } static __be32 nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_exchange_id *exid) { __be32 *p; char *major_id; char *server_scope; int major_id_sz; int server_scope_sz; uint64_t minor_id = 0; if (nfserr) return nfserr; major_id = utsname()->nodename; major_id_sz = strlen(major_id); server_scope = utsname()->nodename; server_scope_sz = strlen(server_scope); RESERVE_SPACE( 8 /* eir_clientid */ + 4 /* eir_sequenceid */ + 4 /* eir_flags */ + 4 /* spr_how (SP4_NONE) */ + 8 /* so_minor_id */ + 4 /* so_major_id.len */ + (XDR_QUADLEN(major_id_sz) * 4) + 4 /* eir_server_scope.len */ + (XDR_QUADLEN(server_scope_sz) * 4) + 4 /* eir_server_impl_id.count (0) */); WRITEMEM(&exid->clientid, 8); WRITE32(exid->seqid); WRITE32(exid->flags); /* state_protect4_r. Currently only support SP4_NONE */ BUG_ON(exid->spa_how != SP4_NONE); WRITE32(exid->spa_how); /* The server_owner struct */ WRITE64(minor_id); /* Minor id */ /* major id */ WRITE32(major_id_sz); WRITEMEM(major_id, major_id_sz); /* Server scope */ WRITE32(server_scope_sz); WRITEMEM(server_scope, server_scope_sz); /* Implementation id */ WRITE32(0); /* zero length nfs_impl_id4 array */ ADJUST_ARGS(); return 0; } static __be32 nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_create_session *sess) { __be32 *p; if (nfserr) return nfserr; RESERVE_SPACE(24); WRITEMEM(sess->sessionid.data, NFS4_MAX_SESSIONID_LEN); WRITE32(sess->seqid); WRITE32(sess->flags); ADJUST_ARGS(); RESERVE_SPACE(28); WRITE32(0); /* headerpadsz */ WRITE32(sess->fore_channel.maxreq_sz); WRITE32(sess->fore_channel.maxresp_sz); WRITE32(sess->fore_channel.maxresp_cached); WRITE32(sess->fore_channel.maxops); WRITE32(sess->fore_channel.maxreqs); WRITE32(sess->fore_channel.nr_rdma_attrs); ADJUST_ARGS(); if (sess->fore_channel.nr_rdma_attrs) { RESERVE_SPACE(4); WRITE32(sess->fore_channel.rdma_attrs); ADJUST_ARGS(); } RESERVE_SPACE(28); WRITE32(0); /* headerpadsz */ WRITE32(sess->back_channel.maxreq_sz); WRITE32(sess->back_channel.maxresp_sz); WRITE32(sess->back_channel.maxresp_cached); WRITE32(sess->back_channel.maxops); WRITE32(sess->back_channel.maxreqs); WRITE32(sess->back_channel.nr_rdma_attrs); ADJUST_ARGS(); if (sess->back_channel.nr_rdma_attrs) { RESERVE_SPACE(4); WRITE32(sess->back_channel.rdma_attrs); ADJUST_ARGS(); } return 0; } static __be32 nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_destroy_session *destroy_session) { return nfserr; } static __be32 nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_free_stateid *free_stateid) { __be32 *p; if (nfserr) return nfserr; RESERVE_SPACE(4); WRITE32(nfserr); ADJUST_ARGS(); return nfserr; } static __be32 nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_sequence *seq) { __be32 *p; if (nfserr) return nfserr; RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 20); WRITEMEM(seq->sessionid.data, NFS4_MAX_SESSIONID_LEN); WRITE32(seq->seqid); WRITE32(seq->slotid); /* Note slotid's are numbered from zero: */ WRITE32(seq->maxslots - 1); /* sr_highest_slotid */ WRITE32(seq->maxslots - 1); /* sr_target_highest_slotid */ WRITE32(seq->status_flags); ADJUST_ARGS(); resp->cstate.datap = p; /* DRC cache data pointer */ return 0; } __be32 nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_test_stateid *test_stateid) { struct nfsd4_test_stateid_id *stateid, *next; __be32 *p; if (nfserr) return nfserr; RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids)); *p++ = htonl(test_stateid->ts_num_ids); list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) { *p++ = stateid->ts_id_status; } ADJUST_ARGS(); return nfserr; } static __be32 nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p) { return nfserr; } typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *); /* * Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1 * since we don't need to filter out obsolete ops as this is * done in the decoding phase. */ static nfsd4_enc nfsd4_enc_ops[] = { [OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access, [OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close, [OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit, [OP_CREATE] = (nfsd4_enc)nfsd4_encode_create, [OP_DELEGPURGE] = (nfsd4_enc)nfsd4_encode_noop, [OP_DELEGRETURN] = (nfsd4_enc)nfsd4_encode_noop, [OP_GETATTR] = (nfsd4_enc)nfsd4_encode_getattr, [OP_GETFH] = (nfsd4_enc)nfsd4_encode_getfh, [OP_LINK] = (nfsd4_enc)nfsd4_encode_link, [OP_LOCK] = (nfsd4_enc)nfsd4_encode_lock, [OP_LOCKT] = (nfsd4_enc)nfsd4_encode_lockt, [OP_LOCKU] = (nfsd4_enc)nfsd4_encode_locku, [OP_LOOKUP] = (nfsd4_enc)nfsd4_encode_noop, [OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop, [OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop, [OP_OPEN] = (nfsd4_enc)nfsd4_encode_open, [OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop, [OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm, [OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade, [OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop, [OP_PUTPUBFH] = (nfsd4_enc)nfsd4_encode_noop, [OP_PUTROOTFH] = (nfsd4_enc)nfsd4_encode_noop, [OP_READ] = (nfsd4_enc)nfsd4_encode_read, [OP_READDIR] = (nfsd4_enc)nfsd4_encode_readdir, [OP_READLINK] = (nfsd4_enc)nfsd4_encode_readlink, [OP_REMOVE] = (nfsd4_enc)nfsd4_encode_remove, [OP_RENAME] = (nfsd4_enc)nfsd4_encode_rename, [OP_RENEW] = (nfsd4_enc)nfsd4_encode_noop, [OP_RESTOREFH] = (nfsd4_enc)nfsd4_encode_noop, [OP_SAVEFH] = (nfsd4_enc)nfsd4_encode_noop, [OP_SECINFO] = (nfsd4_enc)nfsd4_encode_secinfo, [OP_SETATTR] = (nfsd4_enc)nfsd4_encode_setattr, [OP_SETCLIENTID] = (nfsd4_enc)nfsd4_encode_setclientid, [OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop, [OP_VERIFY] = (nfsd4_enc)nfsd4_encode_noop, [OP_WRITE] = (nfsd4_enc)nfsd4_encode_write, [OP_RELEASE_LOCKOWNER] = (nfsd4_enc)nfsd4_encode_noop, /* NFSv4.1 operations */ [OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop, [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session, [OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_destroy_session, [OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_free_stateid, [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop, [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop, [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop, [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence, [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop, [OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid, [OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop, [OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop, [OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop, }; /* * Calculate the total amount of memory that the compound response has taken * after encoding the current operation with pad. * * pad: if operation is non-idempotent, pad was calculate by op_rsize_bop() * which was specified at nfsd4_operation, else pad is zero. * * Compare this length to the session se_fmaxresp_sz and se_fmaxresp_cached. * * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so * will be at least a page and will therefore hold the xdr_buf head. */ int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad) { struct xdr_buf *xb = &resp->rqstp->rq_res; struct nfsd4_session *session = NULL; struct nfsd4_slot *slot = resp->cstate.slot; u32 length, tlen = 0; if (!nfsd4_has_session(&resp->cstate)) return 0; session = resp->cstate.session; if (session == NULL) return 0; if (xb->page_len == 0) { length = (char *)resp->p - (char *)xb->head[0].iov_base + pad; } else { if (xb->tail[0].iov_base && xb->tail[0].iov_len > 0) tlen = (char *)resp->p - (char *)xb->tail[0].iov_base; length = xb->head[0].iov_len + xb->page_len + tlen + pad; } dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__, length, xb->page_len, tlen, pad); if (length > session->se_fchannel.maxresp_sz) return nfserr_rep_too_big; if ((slot->sl_flags & NFSD4_SLOT_CACHETHIS) && length > session->se_fchannel.maxresp_cached) return nfserr_rep_too_big_to_cache; return 0; } void nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) { __be32 *statp; __be32 *p; RESERVE_SPACE(8); WRITE32(op->opnum); statp = p++; /* to be backfilled at the end */ ADJUST_ARGS(); if (op->opnum == OP_ILLEGAL) goto status; BUG_ON(op->opnum < 0 || op->opnum >= ARRAY_SIZE(nfsd4_enc_ops) || !nfsd4_enc_ops[op->opnum]); op->status = nfsd4_enc_ops[op->opnum](resp, op->status, &op->u); /* nfsd4_check_drc_limit guarantees enough room for error status */ if (!op->status) op->status = nfsd4_check_resp_size(resp, 0); status: /* * Note: We write the status directly, instead of using WRITE32(), * since it is already in network byte order. */ *statp = op->status; } /* * Encode the reply stored in the stateowner reply cache * * XDR note: do not encode rp->rp_buflen: the buffer contains the * previously sent already encoded operation. * * called with nfs4_lock_state() held */ void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op) { __be32 *p; struct nfs4_replay *rp = op->replay; BUG_ON(!rp); RESERVE_SPACE(8); WRITE32(op->opnum); *p++ = rp->rp_status; /* already xdr'ed */ ADJUST_ARGS(); RESERVE_SPACE(rp->rp_buflen); WRITEMEM(rp->rp_buf, rp->rp_buflen); ADJUST_ARGS(); } int nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp) { struct svc_rqst *rqstp = rq; struct nfsd4_compoundargs *args = rqstp->rq_argp; if (args->ops != args->iops) { kfree(args->ops); args->ops = args->iops; } kfree(args->tmpp); args->tmpp = NULL; while (args->to_free) { struct tmpbuf *tb = args->to_free; args->to_free = tb->next; tb->release(tb->buf); kfree(tb); } return 1; } int nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args) { args->p = p; args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len; args->pagelist = rqstp->rq_arg.pages; args->pagelen = rqstp->rq_arg.page_len; args->tmpp = NULL; args->to_free = NULL; args->ops = args->iops; args->rqstp = rqstp; return !nfsd4_decode_compound(args); } int nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp) { /* * All that remains is to write the tag and operation count... */ struct nfsd4_compound_state *cs = &resp->cstate; struct kvec *iov; p = resp->tagp; *p++ = htonl(resp->taglen); memcpy(p, resp->tag, resp->taglen); p += XDR_QUADLEN(resp->taglen); *p++ = htonl(resp->opcnt); if (rqstp->rq_res.page_len) iov = &rqstp->rq_res.tail[0]; else iov = &rqstp->rq_res.head[0]; iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; BUG_ON(iov->iov_len > PAGE_SIZE); if (nfsd4_has_session(cs)) { if (cs->status != nfserr_replay_cache) { nfsd4_store_cache_entry(resp); cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; } /* Renew the clientid on success and on replay */ release_session_client(cs->session); nfsd4_put_session(cs->session); } return 1; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
shianyow/kernel-android-galaxy-s2-t989
drivers/net/wireless/iwmc3200wifi/rx.c
757
49105
/* * Intel Wireless Multicomm 3200 WiFi driver * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <ilw@linux.intel.com> * Samuel Ortiz <samuel.ortiz@intel.com> * Zhu Yi <yi.zhu@intel.com> * */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/if_arp.h> #include <linux/list.h> #include <linux/slab.h> #include <net/iw_handler.h> #include "iwm.h" #include "debug.h" #include "hal.h" #include "umac.h" #include "lmac.h" #include "commands.h" #include "rx.h" #include "cfg80211.h" #include "eeprom.h" static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr) { if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) || (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL)) return -EINVAL; return 0; } static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr) { return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr), 16); } /* * Notification handlers: * * For every possible notification we can receive from the * target, we have a handler. * When we get a target notification, and there is no one * waiting for it, it's just processed through the rx code * path: * * iwm_rx_handle() * -> iwm_rx_handle_umac() * -> iwm_rx_handle_wifi() * -> iwm_rx_handle_resp() * -> iwm_ntf_*() * * OR * * -> iwm_rx_handle_non_wifi() * * If there are processes waiting for this notification, then * iwm_rx_handle_wifi() just wakes those processes up and they * grab the pending notification. */ static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_error *error; struct iwm_fw_error_hdr *fw_err; error = (struct iwm_umac_notif_error *)buf; fw_err = &error->err; memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr)); IWM_ERR(iwm, "%cMAC FW ERROR:\n", (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U'); IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category)); IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status)); IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc)); IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1)); IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2)); IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1)); IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2)); IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1)); IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2)); IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num)); IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status)); IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status)); IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status)); iwm_resetting(iwm); return 0; } static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_alive *alive_resp = (struct iwm_umac_notif_alive *)(buf); u16 status = le16_to_cpu(alive_resp->status); if (status == UMAC_NTFY_ALIVE_STATUS_ERR) { IWM_ERR(iwm, "Receive error UMAC_ALIVE\n"); return -EIO; } iwm_tx_credit_init_pools(iwm, alive_resp); return 0; } static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct wiphy *wiphy = iwm_to_wiphy(iwm); struct iwm_umac_notif_init_complete *init_complete = (struct iwm_umac_notif_init_complete *)(buf); u16 status = le16_to_cpu(init_complete->status); bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR); if (blocked) IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n"); else IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n"); wiphy_rfkill_set_hw_state(wiphy, blocked); return 0; } static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { int pool_nr, total_freed_pages; unsigned long pool_map; int i, id; struct iwm_umac_notif_page_dealloc *dealloc = (struct iwm_umac_notif_page_dealloc *)buf; pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT); pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK); IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, " "update map 0x%lx\n", pool_nr, pool_map); spin_lock(&iwm->tx_credit.lock); for (i = 0; i < pool_nr; i++) { id = GET_VAL32(dealloc->grp_info[i], UMAC_DEALLOC_NTFY_GROUP_NUM); if (test_bit(id, &pool_map)) { total_freed_pages = GET_VAL32(dealloc->grp_info[i], UMAC_DEALLOC_NTFY_PAGE_CNT); iwm_tx_credit_inc(iwm, id, total_freed_pages); } } spin_unlock(&iwm->tx_credit.lock); return 0; } static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n"); return 0; } static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]); return 0; } static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_lmac_tx_resp *tx_resp; struct iwm_umac_wifi_in_hdr *hdr; tx_resp = (struct iwm_lmac_tx_resp *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); hdr = (struct iwm_umac_wifi_in_hdr *)buf; IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size); IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n", le16_to_cpu(hdr->sw_hdr.cmd.seq_num)); IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt); IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n", le16_to_cpu(tx_resp->retry_cnt)); IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl)); IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n", le16_to_cpu(tx_resp->byte_cnt)); IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status)); return 0; } static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { u8 opcode; u8 *calib_buf; struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); opcode = hdr->opcode; BUG_ON(opcode >= CALIBRATION_CMD_NUM || opcode < PHY_CALIBRATE_OPCODES_NUM); IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n", opcode); buf_size -= sizeof(struct iwm_umac_wifi_in_hdr); calib_buf = iwm->calib_res[opcode].buf; if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) { kfree(calib_buf); calib_buf = kzalloc(buf_size, GFP_KERNEL); if (!calib_buf) { IWM_ERR(iwm, "Memory allocation failed: calib_res\n"); return -ENOMEM; } iwm->calib_res[opcode].buf = calib_buf; iwm->calib_res[opcode].size = buf_size; } memcpy(calib_buf, hdr, buf_size); set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map); return 0; } static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { IWM_DBG_NTF(iwm, DBG, "Calibration completed\n"); return 0; } static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_lmac_cal_cfg_resp *cal_resp; cal_resp = (struct iwm_lmac_cal_cfg_resp *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n", le32_to_cpu(cal_resp->status)); return 0; } static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_wifi_status *status = (struct iwm_umac_notif_wifi_status *)buf; iwm->core_enabled |= le16_to_cpu(status->status); return 0; } static struct iwm_rx_ticket_node * iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket) { struct iwm_rx_ticket_node *ticket_node; ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL); if (!ticket_node) { IWM_ERR(iwm, "Couldn't allocate ticket node\n"); return ERR_PTR(-ENOMEM); } ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL); if (!ticket_node->ticket) { IWM_ERR(iwm, "Couldn't allocate RX ticket\n"); kfree(ticket_node); return ERR_PTR(-ENOMEM); } memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket)); INIT_LIST_HEAD(&ticket_node->node); return ticket_node; } static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node) { kfree(ticket_node->ticket); kfree(ticket_node); } static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id) { u8 id_hash = IWM_RX_ID_GET_HASH(id); struct iwm_rx_packet *packet; spin_lock(&iwm->packet_lock[id_hash]); list_for_each_entry(packet, &iwm->rx_packets[id_hash], node) if (packet->id == id) { list_del(&packet->node); spin_unlock(&iwm->packet_lock[id_hash]); return packet; } spin_unlock(&iwm->packet_lock[id_hash]); return NULL; } static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf, u32 size, u16 id) { struct iwm_rx_packet *packet; packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL); if (!packet) { IWM_ERR(iwm, "Couldn't allocate packet\n"); return ERR_PTR(-ENOMEM); } packet->skb = dev_alloc_skb(size); if (!packet->skb) { IWM_ERR(iwm, "Couldn't allocate packet SKB\n"); kfree(packet); return ERR_PTR(-ENOMEM); } packet->pkt_size = size; skb_put(packet->skb, size); memcpy(packet->skb->data, buf, size); INIT_LIST_HEAD(&packet->node); packet->id = id; return packet; } void iwm_rx_free(struct iwm_priv *iwm) { struct iwm_rx_ticket_node *ticket, *nt; struct iwm_rx_packet *packet, *np; int i; spin_lock(&iwm->ticket_lock); list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) { list_del(&ticket->node); iwm_rx_ticket_node_free(ticket); } spin_unlock(&iwm->ticket_lock); for (i = 0; i < IWM_RX_ID_HASH; i++) { spin_lock(&iwm->packet_lock[i]); list_for_each_entry_safe(packet, np, &iwm->rx_packets[i], node) { list_del(&packet->node); kfree_skb(packet->skb); kfree(packet); } spin_unlock(&iwm->packet_lock[i]); } } static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_rx_ticket *ntf_rx_ticket = (struct iwm_umac_notif_rx_ticket *)buf; struct iwm_rx_ticket *ticket = (struct iwm_rx_ticket *)ntf_rx_ticket->tickets; int i, schedule_rx = 0; for (i = 0; i < ntf_rx_ticket->num_tickets; i++) { struct iwm_rx_ticket_node *ticket_node; switch (le16_to_cpu(ticket->action)) { case IWM_RX_TICKET_RELEASE: case IWM_RX_TICKET_DROP: /* We can push the packet to the stack */ ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket); if (IS_ERR(ticket_node)) return PTR_ERR(ticket_node); IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n", __le16_to_cpu(ticket->action) == IWM_RX_TICKET_RELEASE ? "RELEASE" : "DROP", ticket->id); spin_lock(&iwm->ticket_lock); list_add_tail(&ticket_node->node, &iwm->rx_tickets); spin_unlock(&iwm->ticket_lock); /* * We received an Rx ticket, most likely there's * a packet pending for it, it's not worth going * through the packet hash list to double check. * Let's just fire the rx worker.. */ schedule_rx = 1; break; default: IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n", ticket->action); } ticket++; } if (schedule_rx) queue_work(iwm->rx_wq, &iwm->rx_worker); return 0; } static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_wifi_in_hdr *wifi_hdr; struct iwm_rx_packet *packet; u16 id, buf_offset; u32 packet_size; u8 id_hash; IWM_DBG_RX(iwm, DBG, "\n"); wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); buf_offset = sizeof(struct iwm_umac_wifi_in_hdr); packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr); IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n", wifi_hdr->sw_hdr.cmd.cmd, id, packet_size); IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id); IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size); packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id); if (IS_ERR(packet)) return PTR_ERR(packet); id_hash = IWM_RX_ID_GET_HASH(id); spin_lock(&iwm->packet_lock[id_hash]); list_add_tail(&packet->node, &iwm->rx_packets[id_hash]); spin_unlock(&iwm->packet_lock[id_hash]); /* We might (unlikely) have received the packet _after_ the ticket */ queue_work(iwm->rx_wq, &iwm->rx_worker); return 0; } /* MLME handlers */ static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_assoc_start *start; start = (struct iwm_umac_notif_assoc_start *)buf; IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n", start->bssid, le32_to_cpu(start->roam_reason)); wake_up_interruptible(&iwm->mlme_queue); return 0; } static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm) { if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 || iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) && (iwm->umac_profile->sec.ucast_cipher == iwm->umac_profile->sec.mcast_cipher) && (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN)) return 1; return 0; } static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct wiphy *wiphy = iwm_to_wiphy(iwm); struct ieee80211_channel *chan; struct iwm_umac_notif_assoc_complete *complete = (struct iwm_umac_notif_assoc_complete *)buf; IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n", complete->bssid, complete->status); switch (le32_to_cpu(complete->status)) { case UMAC_ASSOC_COMPLETE_SUCCESS: chan = ieee80211_get_channel(wiphy, ieee80211_channel_to_frequency(complete->channel)); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { /* Associated to a unallowed channel, disassociate. */ __iwm_invalidate_mlme_profile(iwm); IWM_WARN(iwm, "Couldn't associate with %pM due to " "channel %d is disabled. Check your local " "regulatory setting.\n", complete->bssid, complete->channel); goto failure; } set_bit(IWM_STATUS_ASSOCIATED, &iwm->status); memcpy(iwm->bssid, complete->bssid, ETH_ALEN); iwm->channel = complete->channel; /* Internal roaming state, avoid notifying SME. */ if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) && iwm->conf.mode == UMAC_MODE_BSS) { cancel_delayed_work(&iwm->disconnect); cfg80211_roamed(iwm_to_ndev(iwm), complete->bssid, iwm->req_ie, iwm->req_ie_len, iwm->resp_ie, iwm->resp_ie_len, GFP_KERNEL); break; } iwm_link_on(iwm); if (iwm->conf.mode == UMAC_MODE_IBSS) goto ibss; if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) cfg80211_connect_result(iwm_to_ndev(iwm), complete->bssid, iwm->req_ie, iwm->req_ie_len, iwm->resp_ie, iwm->resp_ie_len, WLAN_STATUS_SUCCESS, GFP_KERNEL); else cfg80211_roamed(iwm_to_ndev(iwm), complete->bssid, iwm->req_ie, iwm->req_ie_len, iwm->resp_ie, iwm->resp_ie_len, GFP_KERNEL); break; case UMAC_ASSOC_COMPLETE_FAILURE: failure: clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; /* Internal roaming state, avoid notifying SME. */ if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) && iwm->conf.mode == UMAC_MODE_BSS) { cancel_delayed_work(&iwm->disconnect); break; } iwm_link_off(iwm); if (iwm->conf.mode == UMAC_MODE_IBSS) goto ibss; if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) if (!iwm_is_open_wep_profile(iwm)) { cfg80211_connect_result(iwm_to_ndev(iwm), complete->bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); } else { /* Let's try shared WEP auth */ IWM_ERR(iwm, "Trying WEP shared auth\n"); schedule_work(&iwm->auth_retry_worker); } else cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); break; default: break; } clear_bit(IWM_STATUS_RESETTING, &iwm->status); return 0; ibss: cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL); clear_bit(IWM_STATUS_RESETTING, &iwm->status); return 0; } static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_profile_invalidate *invalid; u32 reason; invalid = (struct iwm_umac_notif_profile_invalidate *)buf; reason = le32_to_cpu(invalid->reason); IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason); if (reason != UMAC_PROFILE_INVALID_REQUEST && test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)) cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); iwm->umac_profile_active = 0; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; iwm_link_off(iwm); wake_up_interruptible(&iwm->mlme_queue); return 0; } #define IWM_DISCONNECT_INTERVAL (5 * HZ) static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { IWM_DBG_MLME(iwm, DBG, "Connection terminated\n"); schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL); return 0; } static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { int ret; struct iwm_umac_notif_scan_complete *scan_complete = (struct iwm_umac_notif_scan_complete *)buf; u32 result = le32_to_cpu(scan_complete->result); IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n", le32_to_cpu(scan_complete->type), le32_to_cpu(scan_complete->result), scan_complete->seq_num); if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) { IWM_ERR(iwm, "Scan complete while device not scanning\n"); return -EIO; } if (!iwm->scan_request) return 0; ret = iwm_cfg80211_inform_bss(iwm); cfg80211_scan_done(iwm->scan_request, (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret); iwm->scan_request = NULL; return ret; } static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_sta_info *umac_sta = (struct iwm_umac_notif_sta_info *)buf; struct iwm_sta_info *sta; int i; switch (le32_to_cpu(umac_sta->opcode)) { case UMAC_OPCODE_ADD_MODIFY: sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, " "addr = %pM, qos = %d\n", sta->valid ? "Modify" : "Add", GET_VAL8(umac_sta->sta_id, LMAC_STA_ID), GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR), umac_sta->mac_addr, umac_sta->flags & UMAC_STA_FLAG_QOS); sta->valid = 1; sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS; sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR); memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN); break; case UMAC_OPCODE_REMOVE: IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, " "addr = %pM\n", GET_VAL8(umac_sta->sta_id, LMAC_STA_ID), GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR), umac_sta->mac_addr); sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN)) sta->valid = 0; break; case UMAC_OPCODE_CLEAR_ALL: for (i = 0; i < IWM_STA_TABLE_NUM; i++) iwm->sta_table[i].valid = 0; break; default: break; } return 0; } static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct wiphy *wiphy = iwm_to_wiphy(iwm); IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n"); wiphy_rfkill_set_hw_state(wiphy, true); return 0; } static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct wiphy *wiphy = iwm_to_wiphy(iwm); struct ieee80211_mgmt *mgmt; struct iwm_umac_notif_bss_info *umac_bss = (struct iwm_umac_notif_bss_info *)buf; struct ieee80211_channel *channel; struct ieee80211_supported_band *band; struct iwm_bss_info *bss; s32 signal; int freq; u16 frame_len = le16_to_cpu(umac_bss->frame_len); size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len; mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf); IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid); IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type)); IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n", le32_to_cpu(umac_bss->timestamp)); IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n", le16_to_cpu(umac_bss->table_idx)); IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band); IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel); IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi); IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len); list_for_each_entry(bss, &iwm->bss_list, node) if (bss->bss->table_idx == umac_bss->table_idx) break; if (&bss->node != &iwm->bss_list) { /* Remove the old BSS entry, we will add it back later. */ list_del(&bss->node); kfree(bss->bss); } else { /* New BSS entry */ bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL); if (!bss) { IWM_ERR(iwm, "Couldn't allocate bss_info\n"); return -ENOMEM; } } bss->bss = kzalloc(bss_len, GFP_KERNEL); if (!bss->bss) { kfree(bss); IWM_ERR(iwm, "Couldn't allocate bss\n"); return -ENOMEM; } INIT_LIST_HEAD(&bss->node); memcpy(bss->bss, umac_bss, bss_len); if (umac_bss->band == UMAC_BAND_2GHZ) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else if (umac_bss->band == UMAC_BAND_5GHZ) band = wiphy->bands[IEEE80211_BAND_5GHZ]; else { IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band); goto err; } freq = ieee80211_channel_to_frequency(umac_bss->channel); channel = ieee80211_get_channel(wiphy, freq); signal = umac_bss->rssi * 100; bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, frame_len, signal, GFP_KERNEL); if (!bss->cfg_bss) goto err; list_add_tail(&bss->node, &iwm->bss_list); return 0; err: kfree(bss->bss); kfree(bss); return -EINVAL; } static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_bss_removed *bss_rm = (struct iwm_umac_notif_bss_removed *)buf; struct iwm_bss_info *bss, *next; u16 table_idx; int i; for (i = 0; i < le32_to_cpu(bss_rm->count); i++) { table_idx = le16_to_cpu(bss_rm->entries[i]) & IWM_BSS_REMOVE_INDEX_MSK; list_for_each_entry_safe(bss, next, &iwm->bss_list, node) if (bss->bss->table_idx == cpu_to_le16(table_idx)) { struct ieee80211_mgmt *mgmt; mgmt = (struct ieee80211_mgmt *) (bss->bss->frame_buf); IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n", mgmt->bssid); list_del(&bss->node); kfree(bss->bss); kfree(bss); } } return 0; } static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_mgt_frame *mgt_frame = (struct iwm_umac_notif_mgt_frame *)buf; struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame; IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame, le16_to_cpu(mgt_frame->len)); if (ieee80211_is_assoc_req(mgt->frame_control)) { iwm->req_ie_len = le16_to_cpu(mgt_frame->len) - offsetof(struct ieee80211_mgmt, u.assoc_req.variable); kfree(iwm->req_ie); iwm->req_ie = kmemdup(mgt->u.assoc_req.variable, iwm->req_ie_len, GFP_KERNEL); } else if (ieee80211_is_reassoc_req(mgt->frame_control)) { iwm->req_ie_len = le16_to_cpu(mgt_frame->len) - offsetof(struct ieee80211_mgmt, u.reassoc_req.variable); kfree(iwm->req_ie); iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable, iwm->req_ie_len, GFP_KERNEL); } else if (ieee80211_is_assoc_resp(mgt->frame_control)) { iwm->resp_ie_len = le16_to_cpu(mgt_frame->len) - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); kfree(iwm->resp_ie); iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable, iwm->resp_ie_len, GFP_KERNEL); } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) { iwm->resp_ie_len = le16_to_cpu(mgt_frame->len) - offsetof(struct ieee80211_mgmt, u.reassoc_resp.variable); kfree(iwm->resp_ie); iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable, iwm->resp_ie_len, GFP_KERNEL); } else { IWM_ERR(iwm, "Unsupported management frame: 0x%x", le16_to_cpu(mgt->frame_control)); return 0; } return 0; } static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_wifi_if *notif = (struct iwm_umac_notif_wifi_if *)buf; switch (notif->status) { case WIFI_IF_NTFY_ASSOC_START: return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_ASSOC_COMPLETE: return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE: return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_CONNECTION_TERMINATED: return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_SCAN_COMPLETE: return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_STA_TABLE_CHANGE: return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED: IWM_DBG_MLME(iwm, DBG, "Extended IE required\n"); break; case WIFI_IF_NTFY_RADIO_PREEMPTION: return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED: return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd); case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED: return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd); break; case WIFI_IF_NTFY_MGMT_FRAME: return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd); case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START: case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE: case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START: case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT: case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START: case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE: case WIFI_DBG_IF_NTFY_CNCT_ATC_START: case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION: case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP: case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP: IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n", notif->status); break; default: IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status); break; } return 0; } #define IWM_STATS_UPDATE_INTERVAL (2 * HZ) static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf; struct iw_statistics *wstats = &iwm->wstats; u16 max_rate = 0; int i; IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n"); if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) { max_rate = max_t(u16, max_rate, max(le16_to_cpu(stats->tx_rate[i]), le16_to_cpu(stats->rx_rate[i]))); } /* UMAC passes rate info multiplies by 2 */ iwm->rate = max_rate >> 1; } iwm->txpower = le32_to_cpu(stats->tx_power); wstats->status = 0; wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid); wstats->discard.code = le32_to_cpu(stats->rx_drop_decode); wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly); wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry); wstats->miss.beacon = le32_to_cpu(stats->missed_beacons); /* according to cfg80211 */ if (stats->rssi_dbm < -110) wstats->qual.qual = 0; else if (stats->rssi_dbm > -40) wstats->qual.qual = 70; else wstats->qual.qual = stats->rssi_dbm + 110; wstats->qual.level = stats->rssi_dbm; wstats->qual.noise = stats->noise_dbm; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL); mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD)); return 0; } static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy = (struct iwm_umac_cmd_eeprom_proxy *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr; u32 hdr_offset = le32_to_cpu(hdr->offset); u32 hdr_len = le32_to_cpu(hdr->len); u32 hdr_type = le32_to_cpu(hdr->type); IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n", hdr_type, hdr_len, hdr_offset); if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN) return -EINVAL; switch (hdr_type) { case IWM_UMAC_CMD_EEPROM_TYPE_READ: memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len); break; case IWM_UMAC_CMD_EEPROM_TYPE_WRITE: default: return -ENOTSUPP; } return 0; } static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_cmd_get_channel_list *ch_list = (struct iwm_umac_cmd_get_channel_list *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); struct wiphy *wiphy = iwm_to_wiphy(iwm); struct ieee80211_supported_band *band; int i; band = wiphy->bands[IEEE80211_BAND_2GHZ]; for (i = 0; i < band->n_channels; i++) { unsigned long ch_mask_0 = le32_to_cpu(ch_list->ch[0].channels_mask); unsigned long ch_mask_2 = le32_to_cpu(ch_list->ch[2].channels_mask); if (!test_bit(i, &ch_mask_0)) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; if (!test_bit(i, &ch_mask_2)) band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS; } band = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < min(band->n_channels, 32); i++) { unsigned long ch_mask_1 = le32_to_cpu(ch_list->ch[1].channels_mask); unsigned long ch_mask_3 = le32_to_cpu(ch_list->ch[3].channels_mask); if (!test_bit(i, &ch_mask_1)) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; if (!test_bit(i, &ch_mask_3)) band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS; } return 0; } static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_notif_stop_resume_tx *stp_res_tx = (struct iwm_umac_notif_stop_resume_tx *)buf; struct iwm_sta_info *sta_info; struct iwm_tid_info *tid_info; u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id); u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk); int bit, ret = 0; bool stop = false; IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n" "\tflags: 0x%x\n" "\tSTA id: %d\n" "\tTID bitmask: 0x%x\n", stp_res_tx->flags, stp_res_tx->sta_id, stp_res_tx->stop_resume_tid_msk); if (stp_res_tx->flags & UMAC_STOP_TX_FLAG) stop = true; sta_info = &iwm->sta_table[sta_id]; if (!sta_info->valid) { IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n", sta_id, stp_res_tx->sta_id); return -EINVAL; } for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { tid_info = &sta_info->tid_info[bit]; mutex_lock(&tid_info->mutex); tid_info->stopped = stop; mutex_unlock(&tid_info->mutex); if (!stop) { struct iwm_tx_queue *txq; int queue = iwm_tid_to_queue(bit); if (queue < 0) continue; txq = &iwm->txq[queue]; /* * If we resume, we have to move our SKBs * back to the tx queue and queue some work. */ spin_lock_bh(&txq->lock); skb_queue_splice_init(&txq->queue, &txq->stopped_queue); spin_unlock_bh(&txq->lock); queue_work(txq->wq, &txq->worker); } } /* We send an ACK only for the stop case */ if (stop) ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx); return ret; } static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct iwm_umac_wifi_if *hdr; if (cmd == NULL) { IWM_ERR(iwm, "Couldn't find expected wifi command\n"); return -EINVAL; } hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload; IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " "oid is 0x%x\n", hdr->oid); if (hdr->oid <= WIFI_IF_NTFY_MAX) { set_bit(hdr->oid, &iwm->wifi_ntfy[0]); wake_up_interruptible(&iwm->wifi_ntfy_queue); } else return -EINVAL; switch (hdr->oid) { case UMAC_WIFI_IF_CMD_SET_PROFILE: iwm->umac_profile_active = 1; break; default: break; } return 0; } #define CT_KILL_DELAY (30 * HZ) static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { struct wiphy *wiphy = iwm_to_wiphy(iwm); struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *) (buf + sizeof(struct iwm_umac_wifi_in_hdr)); u32 flags = le32_to_cpu(state->flags); IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n", flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF", flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF"); if (flags & IWM_CARD_STATE_CTKILL_DISABLED) { /* * We got a CTKILL event: We bring the interface down in * oder to cool the device down, and try to bring it up * 30 seconds later. If it's still too hot, we'll go through * this code path again. */ cancel_delayed_work_sync(&iwm->ct_kill_delay); schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY); } wiphy_rfkill_set_hw_state(wiphy, flags & (IWM_CARD_STATE_HW_DISABLED | IWM_CARD_STATE_CTKILL_DISABLED)); return 0; } static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size) { struct iwm_umac_wifi_in_hdr *wifi_hdr; struct iwm_wifi_cmd *cmd; u8 source, cmd_id; u16 seq_num; u32 count; wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; cmd_id = wifi_hdr->sw_hdr.cmd.cmd; source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); if (source >= IWM_SRC_NUM) { IWM_CRIT(iwm, "invalid source %d\n", source); return -EINVAL; } if (cmd_id == REPLY_RX_MPDU_CMD) trace_iwm_rx_packet(iwm, buf, buf_size); else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) && (source == UMAC_HDI_IN_SOURCE_FW)) trace_iwm_rx_ticket(iwm, buf, buf_size); else trace_iwm_rx_wifi_cmd(iwm, wifi_hdr); count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT); count += sizeof(struct iwm_umac_wifi_in_hdr) - sizeof(struct iwm_dev_cmd_hdr); if (count > buf_size) { IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size); return -EINVAL; } seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n", cmd_id, source, seq_num); /* * If this is a response to a previously sent command, there must * be a pending command for this sequence number. */ cmd = iwm_get_pending_wifi_cmd(iwm, seq_num); /* Notify the caller only for sync commands. */ switch (source) { case UMAC_HDI_IN_SOURCE_FHRX: if (iwm->lmac_handlers[cmd_id] && test_bit(cmd_id, &iwm->lmac_handler_map[0])) return iwm_notif_send(iwm, cmd, cmd_id, source, buf, count); break; case UMAC_HDI_IN_SOURCE_FW: if (iwm->umac_handlers[cmd_id] && test_bit(cmd_id, &iwm->umac_handler_map[0])) return iwm_notif_send(iwm, cmd, cmd_id, source, buf, count); break; case UMAC_HDI_IN_SOURCE_UDMA: break; } return iwm_rx_handle_resp(iwm, buf, count, cmd); } int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, struct iwm_wifi_cmd *cmd) { u8 source, cmd_id; struct iwm_umac_wifi_in_hdr *wifi_hdr; int ret = 0; wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; cmd_id = wifi_hdr->sw_hdr.cmd.cmd; source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source); switch (source) { case UMAC_HDI_IN_SOURCE_FHRX: if (iwm->lmac_handlers[cmd_id]) ret = iwm->lmac_handlers[cmd_id] (iwm, buf, buf_size, cmd); break; case UMAC_HDI_IN_SOURCE_FW: if (iwm->umac_handlers[cmd_id]) ret = iwm->umac_handlers[cmd_id] (iwm, buf, buf_size, cmd); break; case UMAC_HDI_IN_SOURCE_UDMA: ret = -EINVAL; break; } kfree(cmd); return ret; } static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size) { u8 seq_num; struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf; struct iwm_nonwifi_cmd *cmd; trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size); seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM); /* * We received a non wifi answer. * Let's check if there's a pending command for it, and if so * replace the command payload with the buffer, and then wake the * callers up. * That means we only support synchronised non wifi command response * schemes. */ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending) if (cmd->seq_num == seq_num) { cmd->resp_received = 1; cmd->buf.len = buf_size; memcpy(cmd->buf.hdr, buf, buf_size); wake_up_interruptible(&iwm->nonwifi_queue); } return 0; } static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size) { int ret = 0; u8 op_code; unsigned long buf_offset = 0; struct iwm_udma_in_hdr *hdr; /* * To allow for a more efficient bus usage, UMAC * messages are encapsulated into UDMA ones. This * way we can have several UMAC messages in one bus * transfer. * A UDMA frame size is always aligned on 16 bytes, * and a UDMA frame must not start with a UMAC_PAD_TERMINAL * word. This is how we parse a bus frame into several * UDMA ones. */ while (buf_offset < buf_size) { hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset); if (iwm_rx_check_udma_hdr(hdr) < 0) { IWM_DBG_RX(iwm, DBG, "End of frame\n"); break; } op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE); IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code); if (op_code == UMAC_HDI_IN_OPCODE_WIFI) { ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset, buf_size - buf_offset); } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) { if (GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) != UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) { IWM_ERR(iwm, "Incorrect hw signature\n"); return -EINVAL; } ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset, buf_size - buf_offset); } else { IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code); ret |= -EINVAL; } buf_offset += iwm_rx_resp_size(hdr); } return ret; } int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size) { struct iwm_udma_in_hdr *hdr; hdr = (struct iwm_udma_in_hdr *)buf; switch (le32_to_cpu(hdr->cmd)) { case UMAC_REBOOT_BARKER: if (test_bit(IWM_STATUS_READY, &iwm->status)) { IWM_ERR(iwm, "Unexpected BARKER\n"); schedule_work(&iwm->reset_worker); return 0; } return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION, IWM_SRC_UDMA, buf, buf_size); case UMAC_ACK_BARKER: return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION, IWM_SRC_UDMA, NULL, 0); default: IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd); return iwm_rx_handle_umac(iwm, buf, buf_size); } return 0; } static const iwm_handler iwm_umac_handlers[] = { [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error, [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive, [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete, [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status, [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme, [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update, [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket, [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset, [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics, [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy, [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list, [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx, [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet, [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper, }; static const iwm_handler iwm_lmac_handlers[] = { [REPLY_TX] = iwm_ntf_tx, [REPLY_ALIVE] = iwm_ntf_lmac_version, [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res, [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete, [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg, [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet, [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state, }; void iwm_rx_setup_handlers(struct iwm_priv *iwm) { iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers; iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers; } static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len) { struct ieee80211_hdr *hdr; unsigned int hdr_len; hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_has_protected(hdr->frame_control)) return; hdr_len = ieee80211_hdrlen(hdr->frame_control); if (hdr_total_len <= hdr_len) return; memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len); skb_pull(skb, (hdr_total_len - hdr_len)); } static void iwm_rx_adjust_packet(struct iwm_priv *iwm, struct iwm_rx_packet *packet, struct iwm_rx_ticket_node *ticket_node) { u32 payload_offset = 0, payload_len; struct iwm_rx_ticket *ticket = ticket_node->ticket; struct iwm_rx_mpdu_hdr *mpdu_hdr; struct ieee80211_hdr *hdr; mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data; payload_offset += sizeof(struct iwm_rx_mpdu_hdr); /* Padding is 0 or 2 bytes */ payload_len = le16_to_cpu(mpdu_hdr->len) + (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK); payload_len -= ticket->tail_len; IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, " "ticket offset:%d ticket tail len:%d\n", payload_len, payload_offset, ticket->payload_offset, ticket->tail_len); IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len); skb_pull(packet->skb, payload_offset); skb_trim(packet->skb, payload_len); iwm_remove_iv(packet->skb, ticket->payload_offset); hdr = (struct ieee80211_hdr *) packet->skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) { /* UMAC handed QOS_DATA frame with 2 padding bytes appended * to the qos_ctl field in IEEE 802.11 headers. */ memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2, packet->skb->data, ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); hdr = (struct ieee80211_hdr *) skb_pull(packet->skb, IEEE80211_QOS_CTL_LEN + 2); hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ", packet->skb->data, packet->skb->len); } static void classify8023(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); /* frame has qos control */ skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK; } else { skb->priority = 0; } } static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb) { struct wireless_dev *wdev = iwm_to_wdev(iwm); struct net_device *ndev = iwm_to_ndev(iwm); struct sk_buff_head list; struct sk_buff *frame; IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len); __skb_queue_head_init(&list); ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0); while ((frame = __skb_dequeue(&list))) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += frame->len; frame->protocol = eth_type_trans(frame, ndev); frame->ip_summed = CHECKSUM_NONE; memset(frame->cb, 0, sizeof(frame->cb)); if (netif_rx_ni(frame) == NET_RX_DROP) { IWM_ERR(iwm, "Packet dropped\n"); ndev->stats.rx_dropped++; } } } static void iwm_rx_process_packet(struct iwm_priv *iwm, struct iwm_rx_packet *packet, struct iwm_rx_ticket_node *ticket_node) { int ret; struct sk_buff *skb = packet->skb; struct wireless_dev *wdev = iwm_to_wdev(iwm); struct net_device *ndev = iwm_to_ndev(iwm); IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id); switch (le16_to_cpu(ticket_node->ticket->action)) { case IWM_RX_TICKET_RELEASE: IWM_DBG_RX(iwm, DBG, "RELEASE packet\n"); iwm_rx_adjust_packet(iwm, packet, ticket_node); skb->dev = iwm_to_ndev(iwm); classify8023(skb); if (le16_to_cpu(ticket_node->ticket->flags) & IWM_RX_TICKET_AMSDU_MSK) { iwm_rx_process_amsdu(iwm, skb); break; } ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype); if (ret < 0) { IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - " "%d\n", ret); kfree_skb(packet->skb); break; } IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len); ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; memset(skb->cb, 0, sizeof(skb->cb)); if (netif_rx_ni(skb) == NET_RX_DROP) { IWM_ERR(iwm, "Packet dropped\n"); ndev->stats.rx_dropped++; } break; case IWM_RX_TICKET_DROP: IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n", le16_to_cpu(ticket_node->ticket->flags)); kfree_skb(packet->skb); break; default: IWM_ERR(iwm, "Unknown ticket action: %d\n", le16_to_cpu(ticket_node->ticket->action)); kfree_skb(packet->skb); } kfree(packet); iwm_rx_ticket_node_free(ticket_node); } /* * Rx data processing: * * We're receiving Rx packet from the LMAC, and Rx ticket from * the UMAC. * To forward a target data packet upstream (i.e. to the * kernel network stack), we must have received an Rx ticket * that tells us we're allowed to release this packet (ticket * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates, * among other things, where valid data actually starts in the Rx * packet. */ void iwm_rx_worker(struct work_struct *work) { struct iwm_priv *iwm; struct iwm_rx_ticket_node *ticket, *next; iwm = container_of(work, struct iwm_priv, rx_worker); /* * We go through the tickets list and if there is a pending * packet for it, we push it upstream. * We stop whenever a ticket is missing its packet, as we're * supposed to send the packets in order. */ spin_lock(&iwm->ticket_lock); list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { struct iwm_rx_packet *packet = iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id)); if (!packet) { IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d " "to be handled first\n", le16_to_cpu(ticket->ticket->id)); break; } list_del(&ticket->node); iwm_rx_process_packet(iwm, packet, ticket); } spin_unlock(&iwm->ticket_lock); }
gpl-2.0
adeepv/android-kernel-zte-v9a
drivers/md/raid0.c
757
19541
/* raid0.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr> Copyright (C) 1999, 2000 Ingo Molnar, Red Hat RAID-0 management functions. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/blkdev.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "md.h" #include "raid0.h" #include "raid5.h" static void raid0_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; raid0_conf_t *conf = mddev->private; mdk_rdev_t **devlist = conf->devlist; int raid_disks = conf->strip_zone[0].nb_dev; int i; for (i=0; i < raid_disks; i++) { struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); blk_unplug(r_queue); } } static int raid0_congested(void *data, int bits) { mddev_t *mddev = data; raid0_conf_t *conf = mddev->private; mdk_rdev_t **devlist = conf->devlist; int raid_disks = conf->strip_zone[0].nb_dev; int i, ret = 0; if (mddev_congested(mddev, bits)) return 1; for (i = 0; i < raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } return ret; } /* * inform the user of the raid configuration */ static void dump_zones(mddev_t *mddev) { int j, k, h; sector_t zone_size = 0; sector_t zone_start = 0; char b[BDEVNAME_SIZE]; raid0_conf_t *conf = mddev->private; int raid_disks = conf->strip_zone[0].nb_dev; printk(KERN_INFO "******* %s configuration *********\n", mdname(mddev)); h = 0; for (j = 0; j < conf->nr_strip_zones; j++) { printk(KERN_INFO "zone%d=[", j); for (k = 0; k < conf->strip_zone[j].nb_dev; k++) printk(KERN_CONT "%s/", bdevname(conf->devlist[j*raid_disks + k]->bdev, b)); printk(KERN_CONT "]\n"); zone_size = conf->strip_zone[j].zone_end - zone_start; printk(KERN_INFO " zone offset=%llukb " "device offset=%llukb size=%llukb\n", (unsigned long long)zone_start>>1, (unsigned long long)conf->strip_zone[j].dev_start>>1, (unsigned long long)zone_size>>1); zone_start = conf->strip_zone[j].zone_end; } printk(KERN_INFO "**********************************\n\n"); } static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) { int i, c, err; sector_t curr_zone_end, sectors; mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; struct strip_zone *zone; int cnt; char b[BDEVNAME_SIZE]; raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; list_for_each_entry(rdev1, &mddev->disks, same_set) { printk(KERN_INFO "md/raid0:%s: looking at %s\n", mdname(mddev), bdevname(rdev1->bdev, b)); c = 0; /* round size to chunk_size */ sectors = rdev1->sectors; sector_div(sectors, mddev->chunk_sectors); rdev1->sectors = sectors * mddev->chunk_sectors; list_for_each_entry(rdev2, &mddev->disks, same_set) { printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)", mdname(mddev), bdevname(rdev1->bdev,b), (unsigned long long)rdev1->sectors); printk(KERN_CONT " with %s(%llu)\n", bdevname(rdev2->bdev,b), (unsigned long long)rdev2->sectors); if (rdev2 == rdev1) { printk(KERN_INFO "md/raid0:%s: END\n", mdname(mddev)); break; } if (rdev2->sectors == rdev1->sectors) { /* * Not unique, don't count it as a new * group */ printk(KERN_INFO "md/raid0:%s: EQUAL\n", mdname(mddev)); c = 1; break; } printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n", mdname(mddev)); } if (!c) { printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n", mdname(mddev)); conf->nr_strip_zones++; printk(KERN_INFO "md/raid0:%s: %d zones\n", mdname(mddev), conf->nr_strip_zones); } } printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); err = -ENOMEM; conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); if (!conf->strip_zone) goto abort; conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->nr_strip_zones*mddev->raid_disks, GFP_KERNEL); if (!conf->devlist) goto abort; /* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all */ zone = &conf->strip_zone[0]; cnt = 0; smallest = NULL; dev = conf->devlist; err = -EINVAL; list_for_each_entry(rdev1, &mddev->disks, same_set) { int j = rdev1->raid_disk; if (mddev->level == 10) { /* taking over a raid10-n2 array */ j /= 2; rdev1->new_raid_disk = j; } if (j < 0 || j >= mddev->raid_disks) { printk(KERN_ERR "md/raid0:%s: bad disk number %d - " "aborting!\n", mdname(mddev), j); goto abort; } if (dev[j]) { printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " "aborting!\n", mdname(mddev), j); goto abort; } dev[j] = rdev1; disk_stack_limits(mddev->gendisk, rdev1->bdev, rdev1->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_segments to 1, lying within * a single page. */ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { blk_queue_max_segments(mddev->queue, 1); blk_queue_segment_boundary(mddev->queue, PAGE_CACHE_SIZE - 1); } if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; cnt++; } if (cnt != mddev->raid_disks) { printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " "aborting!\n", mdname(mddev), cnt, mddev->raid_disks); goto abort; } zone->nb_dev = cnt; zone->zone_end = smallest->sectors * cnt; curr_zone_end = zone->zone_end; /* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++) { int j; zone = conf->strip_zone + i; dev = conf->devlist + i * mddev->raid_disks; printk(KERN_INFO "md/raid0:%s: zone %d\n", mdname(mddev), i); zone->dev_start = smallest->sectors; smallest = NULL; c = 0; for (j=0; j<cnt; j++) { rdev = conf->devlist[j]; printk(KERN_INFO "md/raid0:%s: checking %s ...", mdname(mddev), bdevname(rdev->bdev, b)); if (rdev->sectors <= zone->dev_start) { printk(KERN_CONT " nope.\n"); continue; } printk(KERN_CONT " contained as device %d\n", c); dev[c] = rdev; c++; if (!smallest || rdev->sectors < smallest->sectors) { smallest = rdev; printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n", mdname(mddev), (unsigned long long)rdev->sectors); } } zone->nb_dev = c; sectors = (smallest->sectors - zone->dev_start) * c; printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", mdname(mddev), zone->nb_dev, (unsigned long long)sectors); curr_zone_end += sectors; zone->zone_end = curr_zone_end; printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n", mdname(mddev), (unsigned long long)smallest->sectors); } mddev->queue->unplug_fn = raid0_unplug; mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_data = mddev; /* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size */ if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n", mdname(mddev), mddev->chunk_sectors << 9); goto abort; } blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev)); *private_conf = conf; return 0; abort: kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); *private_conf = NULL; return err; } /** * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged * @q: request queue * @bvm: properties of new bio * @biovec: the request that could be merged to it. * * Return amount of bytes we can accept at this offset */ static int raid0_mergeable_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *biovec) { mddev_t *mddev = q->queuedata; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; if (is_power_of_2(chunk_sectors)) max = (chunk_sectors - ((sector & (chunk_sectors-1)) + bio_sectors)) << 9; else max = (chunk_sectors - (sector_div(sector, chunk_sectors) + bio_sectors)) << 9; if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max <= biovec->bv_len && bio_sectors == 0) return biovec->bv_len; else return max; } static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) { sector_t array_sectors = 0; mdk_rdev_t *rdev; WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); list_for_each_entry(rdev, &mddev->disks, same_set) array_sectors += rdev->sectors; return array_sectors; } static int raid0_run(mddev_t *mddev) { raid0_conf_t *conf; int ret; if (mddev->chunk_sectors == 0) { printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", mdname(mddev)); return -EINVAL; } if (md_check_no_bitmap(mddev)) return -EINVAL; blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); mddev->queue->queue_lock = &mddev->queue->__queue_lock; /* if private is not null, we are here after takeover */ if (mddev->private == NULL) { ret = create_strip_zones(mddev, &conf); if (ret < 0) return ret; mddev->private = conf; } conf = mddev->private; /* calculate array device size */ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", mdname(mddev), (unsigned long long)mddev->array_sectors); /* calculate the max read-ahead size. * For read-ahead of large files to be effective, we need to * readahead at least twice a whole stripe. i.e. number of devices * multiplied by chunk size times 2. * If an individual device has an ra_pages greater than the * chunk size, then we will not drive that device as hard as it * wants. We consider this a configuration error: a larger * chunksize should be used in that case. */ { int stripe = mddev->raid_disks * (mddev->chunk_sectors << 9) / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); dump_zones(mddev); md_integrity_register(mddev); return 0; } static int raid0_stop(mddev_t *mddev) { raid0_conf_t *conf = mddev->private; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); mddev->private = NULL; return 0; } /* Find the zone which holds a particular offset * Update *sectorp to be an offset in that zone */ static struct strip_zone *find_zone(struct raid0_private_data *conf, sector_t *sectorp) { int i; struct strip_zone *z = conf->strip_zone; sector_t sector = *sectorp; for (i = 0; i < conf->nr_strip_zones; i++) if (sector < z[i].zone_end) { if (i) *sectorp = sector - z[i-1].zone_end; return z + i; } BUG(); } /* * remaps the bio to the target device. we separate two flows. * power 2 flow and a general flow for the sake of perfromance */ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, sector_t sector, sector_t *sector_offset) { unsigned int sect_in_chunk; sector_t chunk; raid0_conf_t *conf = mddev->private; int raid_disks = conf->strip_zone[0].nb_dev; unsigned int chunk_sects = mddev->chunk_sectors; if (is_power_of_2(chunk_sects)) { int chunksect_bits = ffz(~chunk_sects); /* find the sector offset inside the chunk */ sect_in_chunk = sector & (chunk_sects - 1); sector >>= chunksect_bits; /* chunk in zone */ chunk = *sector_offset; /* quotient is the chunk in real device*/ sector_div(chunk, zone->nb_dev << chunksect_bits); } else{ sect_in_chunk = sector_div(sector, chunk_sects); chunk = *sector_offset; sector_div(chunk, chunk_sects * zone->nb_dev); } /* * position the bio over the real device * real sector = chunk in device + starting of zone * + the position in the chunk */ *sector_offset = (chunk * chunk_sects) + sect_in_chunk; return conf->devlist[(zone - conf->strip_zone)*raid_disks + sector_div(sector, zone->nb_dev)]; } /* * Is io distribute over 1 or more chunks ? */ static inline int is_io_in_chunk_boundary(mddev_t *mddev, unsigned int chunk_sects, struct bio *bio) { if (likely(is_power_of_2(chunk_sects))) { return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) + (bio->bi_size >> 9)); } else{ sector_t sector = bio->bi_sector; return chunk_sects >= (sector_div(sector, chunk_sects) + (bio->bi_size >> 9)); } } static int raid0_make_request(mddev_t *mddev, struct bio *bio) { unsigned int chunk_sects; sector_t sector_offset; struct strip_zone *zone; mdk_rdev_t *tmp_dev; if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { md_barrier_request(mddev, bio); return 0; } chunk_sects = mddev->chunk_sectors; if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { sector_t sector = bio->bi_sector; struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio->bi_vcnt != 1 || bio->bi_idx != 0) goto bad_map; /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ if (likely(is_power_of_2(chunk_sects))) bp = bio_split(bio, chunk_sects - (sector & (chunk_sects-1))); else bp = bio_split(bio, chunk_sects - sector_div(sector, chunk_sects)); if (raid0_make_request(mddev, &bp->bio1)) generic_make_request(&bp->bio1); if (raid0_make_request(mddev, &bp->bio2)) generic_make_request(&bp->bio2); bio_pair_release(bp); return 0; } sector_offset = bio->bi_sector; zone = find_zone(mddev->private, &sector_offset); tmp_dev = map_sector(mddev, zone, bio->bi_sector, &sector_offset); bio->bi_bdev = tmp_dev->bdev; bio->bi_sector = sector_offset + zone->dev_start + tmp_dev->data_offset; /* * Let the main block layer submit the IO and resolve recursion: */ return 1; bad_map: printk("md/raid0:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects / 2, (unsigned long long)bio->bi_sector, bio->bi_size >> 10); bio_io_error(bio); return 0; } static void raid0_status(struct seq_file *seq, mddev_t *mddev) { #undef MD_DEBUG #ifdef MD_DEBUG int j, k, h; char b[BDEVNAME_SIZE]; raid0_conf_t *conf = mddev->private; int raid_disks = conf->strip_zone[0].nb_dev; sector_t zone_size; sector_t zone_start = 0; h = 0; for (j = 0; j < conf->nr_strip_zones; j++) { seq_printf(seq, " z%d", j); seq_printf(seq, "=["); for (k = 0; k < conf->strip_zone[j].nb_dev; k++) seq_printf(seq, "%s/", bdevname( conf->devlist[j*raid_disks + k] ->bdev, b)); zone_size = conf->strip_zone[j].zone_end - zone_start; seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n", (unsigned long long)zone_start>>1, (unsigned long long)conf->strip_zone[j].dev_start>>1, (unsigned long long)zone_size>>1); zone_start = conf->strip_zone[j].zone_end; } #endif seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); return; } static void *raid0_takeover_raid45(mddev_t *mddev) { mdk_rdev_t *rdev; raid0_conf_t *priv_conf; if (mddev->degraded != 1) { printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", mdname(mddev), mddev->degraded); return ERR_PTR(-EINVAL); } list_for_each_entry(rdev, &mddev->disks, same_set) { /* check slot number for a disk */ if (rdev->raid_disk == mddev->raid_disks-1) { printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", mdname(mddev)); return ERR_PTR(-EINVAL); } } /* Set new parameters */ mddev->new_level = 0; mddev->new_layout = 0; mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->raid_disks--; mddev->delta_disks = -1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; create_strip_zones(mddev, &priv_conf); return priv_conf; } static void *raid0_takeover_raid10(mddev_t *mddev) { raid0_conf_t *priv_conf; /* Check layout: * - far_copies must be 1 * - near_copies must be 2 * - disks number must be even * - all mirrors must be already degraded */ if (mddev->layout != ((1 << 8) + 2)) { printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n", mdname(mddev), mddev->layout); return ERR_PTR(-EINVAL); } if (mddev->raid_disks & 1) { printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n", mdname(mddev)); return ERR_PTR(-EINVAL); } if (mddev->degraded != (mddev->raid_disks>>1)) { printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", mdname(mddev)); return ERR_PTR(-EINVAL); } /* Set new parameters */ mddev->new_level = 0; mddev->new_layout = 0; mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->delta_disks = - mddev->raid_disks / 2; mddev->raid_disks += mddev->delta_disks; mddev->degraded = 0; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; create_strip_zones(mddev, &priv_conf); return priv_conf; } static void *raid0_takeover(mddev_t *mddev) { /* raid0 can take over: * raid4 - if all data disks are active. * raid5 - providing it is Raid4 layout and one disk is faulty * raid10 - assuming we have all necessary active disks */ if (mddev->level == 4) return raid0_takeover_raid45(mddev); if (mddev->level == 5) { if (mddev->layout == ALGORITHM_PARITY_N) return raid0_takeover_raid45(mddev); printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", mdname(mddev), ALGORITHM_PARITY_N); } if (mddev->level == 10) return raid0_takeover_raid10(mddev); return ERR_PTR(-EINVAL); } static void raid0_quiesce(mddev_t *mddev, int state) { } static struct mdk_personality raid0_personality= { .name = "raid0", .level = 0, .owner = THIS_MODULE, .make_request = raid0_make_request, .run = raid0_run, .stop = raid0_stop, .status = raid0_status, .size = raid0_size, .takeover = raid0_takeover, .quiesce = raid0_quiesce, }; static int __init raid0_init (void) { return register_md_personality (&raid0_personality); } static void raid0_exit (void) { unregister_md_personality (&raid0_personality); } module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RAID0 (striping) personality for MD"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ MODULE_ALIAS("md-raid0"); MODULE_ALIAS("md-level-0");
gpl-2.0
GabrielL/linux
drivers/hwmon/i5k_amb.c
1269
16096
/* * A hwmon driver for the Intel 5000 series chipset FB-DIMM AMB * temperature sensors * Copyright (C) 2007 IBM * * Author: Darrick J. Wong <darrick.wong@oracle.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/log2.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #define DRVNAME "i5k_amb" #define I5K_REG_AMB_BASE_ADDR 0x48 #define I5K_REG_AMB_LEN_ADDR 0x50 #define I5K_REG_CHAN0_PRESENCE_ADDR 0x64 #define I5K_REG_CHAN1_PRESENCE_ADDR 0x66 #define AMB_REG_TEMP_MIN_ADDR 0x80 #define AMB_REG_TEMP_MID_ADDR 0x81 #define AMB_REG_TEMP_MAX_ADDR 0x82 #define AMB_REG_TEMP_STATUS_ADDR 0x84 #define AMB_REG_TEMP_ADDR 0x85 #define AMB_CONFIG_SIZE 2048 #define AMB_FUNC_3_OFFSET 768 static unsigned long amb_reg_temp_status(unsigned int amb) { return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_STATUS_ADDR + AMB_CONFIG_SIZE * amb; } static unsigned long amb_reg_temp_min(unsigned int amb) { return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MIN_ADDR + AMB_CONFIG_SIZE * amb; } static unsigned long amb_reg_temp_mid(unsigned int amb) { return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MID_ADDR + AMB_CONFIG_SIZE * amb; } static unsigned long amb_reg_temp_max(unsigned int amb) { return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MAX_ADDR + AMB_CONFIG_SIZE * amb; } static unsigned long amb_reg_temp(unsigned int amb) { return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_ADDR + AMB_CONFIG_SIZE * amb; } #define MAX_MEM_CHANNELS 4 #define MAX_AMBS_PER_CHANNEL 16 #define MAX_AMBS (MAX_MEM_CHANNELS * \ MAX_AMBS_PER_CHANNEL) #define CHANNEL_SHIFT 4 #define DIMM_MASK 0xF /* * Ugly hack: For some reason the highest bit is set if there * are _any_ DIMMs in the channel. Attempting to read from * this "high-order" AMB results in a memory bus error, so * for now we'll just ignore that top bit, even though that * might prevent us from seeing the 16th DIMM in the channel. */ #define REAL_MAX_AMBS_PER_CHANNEL 15 #define KNOBS_PER_AMB 6 static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit) { return byte_num * MAX_AMBS_PER_CHANNEL + bit; } #define AMB_SYSFS_NAME_LEN 16 struct i5k_device_attribute { struct sensor_device_attribute s_attr; char name[AMB_SYSFS_NAME_LEN]; }; struct i5k_amb_data { struct device *hwmon_dev; unsigned long amb_base; unsigned long amb_len; u16 amb_present[MAX_MEM_CHANNELS]; void __iomem *amb_mmio; struct i5k_device_attribute *attrs; unsigned int num_attrs; }; static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", DRVNAME); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct platform_device *amb_pdev; static u8 amb_read_byte(struct i5k_amb_data *data, unsigned long offset) { return ioread8(data->amb_mmio + offset); } static void amb_write_byte(struct i5k_amb_data *data, unsigned long offset, u8 val) { iowrite8(val, data->amb_mmio + offset); } static ssize_t show_amb_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); if (!(amb_read_byte(data, amb_reg_temp_status(attr->index)) & 0x20) && (amb_read_byte(data, amb_reg_temp_status(attr->index)) & 0x8)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_amb_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); unsigned long temp; int ret = kstrtoul(buf, 10, &temp); if (ret < 0) return ret; temp = temp / 500; if (temp > 255) temp = 255; amb_write_byte(data, amb_reg_temp_min(attr->index), temp); return count; } static ssize_t store_amb_mid(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); unsigned long temp; int ret = kstrtoul(buf, 10, &temp); if (ret < 0) return ret; temp = temp / 500; if (temp > 255) temp = 255; amb_write_byte(data, amb_reg_temp_mid(attr->index), temp); return count; } static ssize_t store_amb_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); unsigned long temp; int ret = kstrtoul(buf, 10, &temp); if (ret < 0) return ret; temp = temp / 500; if (temp > 255) temp = 255; amb_write_byte(data, amb_reg_temp_max(attr->index), temp); return count; } static ssize_t show_amb_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", 500 * amb_read_byte(data, amb_reg_temp_min(attr->index))); } static ssize_t show_amb_mid(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", 500 * amb_read_byte(data, amb_reg_temp_mid(attr->index))); } static ssize_t show_amb_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", 500 * amb_read_byte(data, amb_reg_temp_max(attr->index))); } static ssize_t show_amb_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i5k_amb_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", 500 * amb_read_byte(data, amb_reg_temp(attr->index))); } static ssize_t show_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT, attr->index & DIMM_MASK); } static int i5k_amb_hwmon_init(struct platform_device *pdev) { int i, j, k, d = 0; u16 c; int res = 0; int num_ambs = 0; struct i5k_amb_data *data = platform_get_drvdata(pdev); /* Count the number of AMBs found */ /* ignore the high-order bit, see "Ugly hack" comment above */ for (i = 0; i < MAX_MEM_CHANNELS; i++) num_ambs += hweight16(data->amb_present[i] & 0x7fff); /* Set up sysfs stuff */ data->attrs = kzalloc(sizeof(*data->attrs) * num_ambs * KNOBS_PER_AMB, GFP_KERNEL); if (!data->attrs) return -ENOMEM; data->num_attrs = 0; for (i = 0; i < MAX_MEM_CHANNELS; i++) { c = data->amb_present[i]; for (j = 0; j < REAL_MAX_AMBS_PER_CHANNEL; j++, c >>= 1) { struct i5k_device_attribute *iattr; k = amb_num_from_reg(i, j); if (!(c & 0x1)) continue; d++; /* sysfs label */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_label", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IRUGO; iattr->s_attr.dev_attr.show = show_label; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; /* Temperature sysfs knob */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_input", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IRUGO; iattr->s_attr.dev_attr.show = show_amb_temp; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; /* Temperature min sysfs knob */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_min", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO; iattr->s_attr.dev_attr.show = show_amb_min; iattr->s_attr.dev_attr.store = store_amb_min; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; /* Temperature mid sysfs knob */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_mid", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO; iattr->s_attr.dev_attr.show = show_amb_mid; iattr->s_attr.dev_attr.store = store_amb_mid; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; /* Temperature max sysfs knob */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_max", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO; iattr->s_attr.dev_attr.show = show_amb_max; iattr->s_attr.dev_attr.store = store_amb_max; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; /* Temperature alarm sysfs knob */ iattr = data->attrs + data->num_attrs; snprintf(iattr->name, AMB_SYSFS_NAME_LEN, "temp%d_alarm", d); iattr->s_attr.dev_attr.attr.name = iattr->name; iattr->s_attr.dev_attr.attr.mode = S_IRUGO; iattr->s_attr.dev_attr.show = show_amb_alarm; iattr->s_attr.index = k; sysfs_attr_init(&iattr->s_attr.dev_attr.attr); res = device_create_file(&pdev->dev, &iattr->s_attr.dev_attr); if (res) goto exit_remove; data->num_attrs++; } } res = device_create_file(&pdev->dev, &dev_attr_name); if (res) goto exit_remove; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { res = PTR_ERR(data->hwmon_dev); goto exit_remove; } return res; exit_remove: device_remove_file(&pdev->dev, &dev_attr_name); for (i = 0; i < data->num_attrs; i++) device_remove_file(&pdev->dev, &data->attrs[i].s_attr.dev_attr); kfree(data->attrs); return res; } static int i5k_amb_add(void) { int res = -ENODEV; /* only ever going to be one of these */ amb_pdev = platform_device_alloc(DRVNAME, 0); if (!amb_pdev) return -ENOMEM; res = platform_device_add(amb_pdev); if (res) goto err; return 0; err: platform_device_put(amb_pdev); return res; } static int i5k_find_amb_registers(struct i5k_amb_data *data, unsigned long devid) { struct pci_dev *pcidev; u32 val32; int res = -ENODEV; /* Find AMB register memory space */ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, NULL); if (!pcidev) return -ENODEV; if (pci_read_config_dword(pcidev, I5K_REG_AMB_BASE_ADDR, &val32)) goto out; data->amb_base = val32; if (pci_read_config_dword(pcidev, I5K_REG_AMB_LEN_ADDR, &val32)) goto out; data->amb_len = val32; /* Is it big enough? */ if (data->amb_len < AMB_CONFIG_SIZE * MAX_AMBS) { dev_err(&pcidev->dev, "AMB region too small!\n"); goto out; } res = 0; out: pci_dev_put(pcidev); return res; } static int i5k_channel_probe(u16 *amb_present, unsigned long dev_id) { struct pci_dev *pcidev; u16 val16; int res = -ENODEV; /* Copy the DIMM presence map for these two channels */ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); if (!pcidev) return -ENODEV; if (pci_read_config_word(pcidev, I5K_REG_CHAN0_PRESENCE_ADDR, &val16)) goto out; amb_present[0] = val16; if (pci_read_config_word(pcidev, I5K_REG_CHAN1_PRESENCE_ADDR, &val16)) goto out; amb_present[1] = val16; res = 0; out: pci_dev_put(pcidev); return res; } static struct { unsigned long err; unsigned long fbd0; } chipset_ids[] = { { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, { 0, 0 } }; #ifdef MODULE static struct pci_device_id i5k_amb_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, { 0, } }; MODULE_DEVICE_TABLE(pci, i5k_amb_ids); #endif static int i5k_amb_probe(struct platform_device *pdev) { struct i5k_amb_data *data; struct resource *reso; int i, res; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; /* Figure out where the AMB registers live */ i = 0; do { res = i5k_find_amb_registers(data, chipset_ids[i].err); if (res == 0) break; i++; } while (chipset_ids[i].err); if (res) goto err; /* Copy the DIMM presence map for the first two channels */ res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0); if (res) goto err; /* Copy the DIMM presence map for the optional second two channels */ i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1); /* Set up resource regions */ reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); if (!reso) { res = -EBUSY; goto err; } data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len); if (!data->amb_mmio) { res = -EBUSY; goto err_map_failed; } platform_set_drvdata(pdev, data); res = i5k_amb_hwmon_init(pdev); if (res) goto err_init_failed; return res; err_init_failed: iounmap(data->amb_mmio); err_map_failed: release_mem_region(data->amb_base, data->amb_len); err: kfree(data); return res; } static int i5k_amb_remove(struct platform_device *pdev) { int i; struct i5k_amb_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); device_remove_file(&pdev->dev, &dev_attr_name); for (i = 0; i < data->num_attrs; i++) device_remove_file(&pdev->dev, &data->attrs[i].s_attr.dev_attr); kfree(data->attrs); iounmap(data->amb_mmio); release_mem_region(data->amb_base, data->amb_len); kfree(data); return 0; } static struct platform_driver i5k_amb_driver = { .driver = { .name = DRVNAME, }, .probe = i5k_amb_probe, .remove = i5k_amb_remove, }; static int __init i5k_amb_init(void) { int res; res = platform_driver_register(&i5k_amb_driver); if (res) return res; res = i5k_amb_add(); if (res) platform_driver_unregister(&i5k_amb_driver); return res; } static void __exit i5k_amb_exit(void) { platform_device_unregister(amb_pdev); platform_driver_unregister(&i5k_amb_driver); } MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>"); MODULE_DESCRIPTION("Intel 5000 chipset FB-DIMM AMB temperature sensor"); MODULE_LICENSE("GPL"); module_init(i5k_amb_init); module_exit(i5k_amb_exit);
gpl-2.0
xtrymind/android_kernel_msm
drivers/media/video/msm/vfe/msm_vfe7x.c
1269
16860
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/msm_adsp.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/android_pmem.h> #include <linux/slab.h> #include <mach/msm_adsp.h> #include <mach/clk.h> #include <linux/delay.h> #include <linux/wait.h> #include "msm_vfe7x.h" #include <linux/pm_qos.h> #define QDSP_CMDQUEUE 25 #define VFE_RESET_CMD 0 #define VFE_START_CMD 1 #define VFE_STOP_CMD 2 #define VFE_FRAME_ACK 20 #define STATS_AF_ACK 21 #define STATS_WE_ACK 22 #define MSG_STOP_ACK 1 #define MSG_SNAPSHOT 2 #define MSG_OUTPUT1 6 #define MSG_OUTPUT2 7 #define MSG_STATS_AF 8 #define MSG_STATS_WE 9 #define MSG_OUTPUT_S 10 #define MSG_OUTPUT_T 11 #define VFE_ADSP_EVENT 0xFFFF #define SNAPSHOT_MASK_MODE 0x00000002 #define MSM_AXI_QOS_PREVIEW 192000 #define MSM_AXI_QOS_SNAPSHOT 192000 static struct msm_adsp_module *qcam_mod; static struct msm_adsp_module *vfe_mod; static struct msm_vfe_callback *resp; static void *extdata; static uint32_t extlen; struct mutex vfe_lock; static void *vfe_syncdata; static uint8_t vfestopped; static uint32_t vfetask_state; static int cnt; static struct stop_event stopevent; unsigned long paddr_s_y; unsigned long paddr_s_cbcr; unsigned long paddr_t_y; unsigned long paddr_t_cbcr; static void vfe_7x_convert(struct msm_vfe_phy_info *pinfo, enum vfe_resp_msg type, void *data, void **ext, int32_t *elen) { switch (type) { case VFE_MSG_OUTPUT_P: { pinfo->p0_phy = ((struct vfe_endframe *)data)->y_address; pinfo->p1_phy = ((struct vfe_endframe *)data)->cbcr_address; pinfo->p2_phy = pinfo->p0_phy; pinfo->output_id = OUTPUT_TYPE_P; CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n", pinfo->p0_phy, pinfo->p1_phy); ((struct vfe_frame_extra *)extdata)->bl_evencol = ((struct vfe_endframe *)data)->blacklevelevencolumn; ((struct vfe_frame_extra *)extdata)->bl_oddcol = ((struct vfe_endframe *)data)->blackleveloddcolumn; ((struct vfe_frame_extra *)extdata)->g_def_p_cnt = ((struct vfe_endframe *)data)->greendefectpixelcount; ((struct vfe_frame_extra *)extdata)->r_b_def_p_cnt = ((struct vfe_endframe *)data)->redbluedefectpixelcount; *ext = extdata; *elen = extlen; } break; case VFE_MSG_OUTPUT_S: { pinfo->p0_phy = paddr_s_y; pinfo->p1_phy = paddr_s_cbcr; pinfo->p2_phy = pinfo->p0_phy; pinfo->output_id = OUTPUT_TYPE_S; CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n", pinfo->p0_phy, pinfo->p1_phy); } break; case VFE_MSG_OUTPUT_T: { pinfo->p0_phy = paddr_t_y; pinfo->p1_phy = paddr_t_cbcr; pinfo->p2_phy = pinfo->p0_phy; pinfo->output_id = OUTPUT_TYPE_T; CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n", pinfo->p0_phy, pinfo->p1_phy); } break; case VFE_MSG_STATS_AF: case VFE_MSG_STATS_WE: pinfo->sbuf_phy = *(uint32_t *)data; break; default: break; } /* switch */ } static void vfe_7x_ops(void *driver_data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { uint32_t evt_buf[3]; struct msm_vfe_resp *rp; void *data; CDBG("%s:id=%d\n", __func__, id); len = (id == VFE_ADSP_EVENT) ? 0 : len; data = resp->vfe_alloc(sizeof(struct msm_vfe_resp) + len, vfe_syncdata, GFP_ATOMIC); if (!data) { pr_err("%s: rp: cannot allocate buffer\n", __func__); return; } rp = (struct msm_vfe_resp *)data; rp->evt_msg.len = len; if (id == VFE_ADSP_EVENT) { /* event */ rp->type = VFE_EVENT; rp->evt_msg.type = MSM_CAMERA_EVT; getevent(evt_buf, sizeof(evt_buf)); rp->evt_msg.msg_id = evt_buf[0]; CDBG("%s:event:msg_id=%d\n", __func__, rp->evt_msg.msg_id); resp->vfe_resp(rp, MSM_CAM_Q_VFE_EVT, vfe_syncdata, GFP_ATOMIC); } else { /* messages */ rp->evt_msg.type = MSM_CAMERA_MSG; rp->evt_msg.msg_id = id; rp->evt_msg.data = rp + 1; getevent(rp->evt_msg.data, len); CDBG("%s:messages:msg_id=%d\n", __func__, rp->evt_msg.msg_id); switch (rp->evt_msg.msg_id) { case MSG_SNAPSHOT: update_axi_qos(MSM_AXI_QOS_PREVIEW); vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent); vfe_7x_ops(driver_data, MSG_OUTPUT_T, len, getevent); rp->type = VFE_MSG_SNAPSHOT; break; case MSG_OUTPUT_S: rp->type = VFE_MSG_OUTPUT_S; vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_S, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_OUTPUT_T: rp->type = VFE_MSG_OUTPUT_T; vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_T, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_OUTPUT1: case MSG_OUTPUT2: rp->type = VFE_MSG_OUTPUT_P; vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_P, rp->evt_msg.data, &(rp->extdata), &(rp->extlen)); break; case MSG_STATS_AF: rp->type = VFE_MSG_STATS_AF; vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_AF, rp->evt_msg.data, NULL, NULL); break; case MSG_STATS_WE: rp->type = VFE_MSG_STATS_WE; vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_WE, rp->evt_msg.data, NULL, NULL); CDBG("MSG_STATS_WE: phy = 0x%x\n", rp->phy.sbuf_phy); break; case MSG_STOP_ACK: rp->type = VFE_MSG_GENERAL; stopevent.state = 1; wake_up(&stopevent.wait); break; default: rp->type = VFE_MSG_GENERAL; break; } resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, vfe_syncdata, GFP_ATOMIC); } } static struct msm_adsp_ops vfe_7x_sync = { .event = vfe_7x_ops, }; static int vfe_7x_enable(struct camera_enable_cmd *enable) { int rc = -EFAULT; if (!strcmp(enable->name, "QCAMTASK")) rc = msm_adsp_enable(qcam_mod); else if (!strcmp(enable->name, "VFETASK")) { rc = msm_adsp_enable(vfe_mod); vfetask_state = 1; } if (!cnt) { add_axi_qos(); cnt++; } return rc; } static int vfe_7x_disable(struct camera_enable_cmd *enable, struct platform_device *dev __attribute__((unused))) { int rc = -EFAULT; if (!strcmp(enable->name, "QCAMTASK")) rc = msm_adsp_disable(qcam_mod); else if (!strcmp(enable->name, "VFETASK")) { rc = msm_adsp_disable(vfe_mod); vfetask_state = 0; } return rc; } static int vfe_7x_stop(void) { int rc = 0; uint32_t stopcmd = VFE_STOP_CMD; rc = msm_adsp_write(vfe_mod, QDSP_CMDQUEUE, &stopcmd, sizeof(uint32_t)); if (rc < 0) { CDBG("%s:%d: failed rc = %d \n", __func__, __LINE__, rc); return rc; } stopevent.state = 0; rc = wait_event_timeout(stopevent.wait, stopevent.state != 0, msecs_to_jiffies(stopevent.timeout)); return rc; } static void vfe_7x_release(struct platform_device *pdev) { mutex_lock(&vfe_lock); vfe_syncdata = NULL; mutex_unlock(&vfe_lock); if (!vfestopped) { CDBG("%s:%d:Calling vfe_7x_stop()\n", __func__, __LINE__); vfe_7x_stop(); } else vfestopped = 0; msm_adsp_disable(qcam_mod); msm_adsp_disable(vfe_mod); vfetask_state = 0; msm_adsp_put(qcam_mod); msm_adsp_put(vfe_mod); msm_camio_disable(pdev); kfree(extdata); extlen = 0; /* Release AXI */ release_axi_qos(); cnt = 0; } static int vfe_7x_init(struct msm_vfe_callback *presp, struct platform_device *dev) { int rc = 0; init_waitqueue_head(&stopevent.wait); stopevent.timeout = 200; stopevent.state = 0; if (presp && presp->vfe_resp) resp = presp; else return -EFAULT; /* Bring up all the required GPIOs and Clocks */ rc = msm_camio_enable(dev); if (rc < 0) return rc; msm_camio_camif_pad_reg_reset(); extlen = sizeof(struct vfe_frame_extra); extdata = kmalloc(extlen, GFP_ATOMIC); if (!extdata) { rc = -ENOMEM; goto init_fail; } rc = msm_adsp_get("QCAMTASK", &qcam_mod, &vfe_7x_sync, NULL); if (rc) { rc = -EBUSY; goto get_qcam_fail; } rc = msm_adsp_get("VFETASK", &vfe_mod, &vfe_7x_sync, NULL); if (rc) { rc = -EBUSY; goto get_vfe_fail; } return 0; get_vfe_fail: msm_adsp_put(qcam_mod); get_qcam_fail: kfree(extdata); init_fail: extlen = 0; return rc; } static int vfe_7x_config_axi(int mode, struct axidata *ad, struct axiout *ao) { struct msm_pmem_region *regptr; unsigned long *bptr; int cnt; int rc = 0; if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) { regptr = ad->region; CDBG("bufnum1 = %d\n", ad->bufnum1); if (mode == OUTPUT_1_AND_2) { paddr_t_y = regptr->paddr + regptr->info.planar0_off; paddr_t_cbcr = regptr->paddr + regptr->info.planar1_off; } CDBG("config_axi1: O1, phy = 0x%lx, y_off = %d, cbcr_off =%d\n", regptr->paddr, regptr->info.planar0_off, regptr->info.planar1_off); bptr = &ao->output1buffer1_y_phy; for (cnt = 0; cnt < ad->bufnum1; cnt++) { *bptr = regptr->paddr + regptr->info.planar0_off; bptr++; *bptr = regptr->paddr + regptr->info.planar1_off; bptr++; regptr++; } regptr--; for (cnt = 0; cnt < (8 - ad->bufnum1); cnt++) { *bptr = regptr->paddr + regptr->info.planar0_off; bptr++; *bptr = regptr->paddr + regptr->info.planar1_off; bptr++; } } /* if OUTPUT1 or Both */ if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) { regptr = &(ad->region[ad->bufnum1]); CDBG("bufnum2 = %d\n", ad->bufnum2); paddr_s_y = regptr->paddr + regptr->info.planar0_off; paddr_s_cbcr = regptr->paddr + regptr->info.planar1_off; CDBG("config_axi2: O2, phy = 0x%lx, y_off = %d, cbcr_off =%d\n", regptr->paddr, regptr->info.planar0_off, regptr->info.planar1_off); bptr = &ao->output2buffer1_y_phy; for (cnt = 0; cnt < ad->bufnum2; cnt++) { *bptr = regptr->paddr + regptr->info.planar0_off; bptr++; *bptr = regptr->paddr + regptr->info.planar1_off; bptr++; regptr++; } regptr--; for (cnt = 0; cnt < (8 - ad->bufnum2); cnt++) { *bptr = regptr->paddr + regptr->info.planar0_off; bptr++; *bptr = regptr->paddr + regptr->info.planar1_off; bptr++; } } return rc; } static int vfe_7x_config(struct msm_vfe_cfg_cmd *cmd, void *data) { struct msm_pmem_region *regptr; unsigned char buf[256]; struct vfe_stats_ack sack; struct axidata *axid; uint32_t i, op_mode; uint32_t *_mode; struct vfe_stats_we_cfg *scfg = NULL; struct vfe_stats_af_cfg *sfcfg = NULL; struct axiout *axio = NULL; void *cmd_data = NULL; void *cmd_data_alloc = NULL; long rc = 0; struct msm_vfe_command_7k *vfecmd; vfecmd = kmalloc(sizeof(struct msm_vfe_command_7k), GFP_ATOMIC); if (!vfecmd) { pr_err("vfecmd alloc failed!\n"); return -ENOMEM; } if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && cmd->cmd_type != CMD_STATS_BUF_RELEASE && cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) { if (copy_from_user(vfecmd, (void __user *)(cmd->value), sizeof(struct msm_vfe_command_7k))) { rc = -EFAULT; goto config_failure; } } switch (cmd->cmd_type) { case CMD_STATS_AEC_AWB_ENABLE: case CMD_STATS_AXI_CFG: { axid = data; if (!axid) { rc = -EFAULT; goto config_failure; } scfg = kmalloc(sizeof(struct vfe_stats_we_cfg), GFP_ATOMIC); if (!scfg) { rc = -ENOMEM; goto config_failure; } if (copy_from_user(scfg, (void __user *)(vfecmd->value), vfecmd->length)) { rc = -EFAULT; goto config_done; } CDBG("STATS_ENABLE: bufnum = %d, enabling = %d\n", axid->bufnum1, scfg->wb_expstatsenable); if (axid->bufnum1 > 0) { regptr = axid->region; for (i = 0; i < axid->bufnum1; i++) { CDBG("STATS_ENABLE, phy = 0x%lx\n", regptr->paddr); scfg->wb_expstatoutputbuffer[i] = (void *)regptr->paddr; regptr++; } cmd_data = scfg; } else { rc = -EINVAL; goto config_done; } } break; case CMD_STATS_AF_ENABLE: case CMD_STATS_AF_AXI_CFG: { axid = data; if (!axid) { rc = -EFAULT; goto config_failure; } sfcfg = kmalloc(sizeof(struct vfe_stats_af_cfg), GFP_ATOMIC); if (!sfcfg) { rc = -ENOMEM; goto config_failure; } if (copy_from_user(sfcfg, (void __user *)(vfecmd->value), vfecmd->length)) { rc = -EFAULT; goto config_done; } CDBG("AF_ENABLE: bufnum = %d, enabling = %d\n", axid->bufnum1, sfcfg->af_enable); if (axid->bufnum1 > 0) { regptr = &axid->region[0]; for (i = 0; i < axid->bufnum1; i++) { CDBG("STATS_ENABLE, phy = 0x%lx\n", regptr->paddr); sfcfg->af_outbuf[i] = (void *)regptr->paddr; regptr++; } cmd_data = sfcfg; } else { rc = -EINVAL; goto config_done; } } break; case CMD_FRAME_BUF_RELEASE: { struct msm_frame *b; unsigned long p; struct vfe_outputack fack; if (!data) { rc = -EFAULT; goto config_failure; } b = (struct msm_frame *)(cmd->value); p = *(unsigned long *)data; fack.header = VFE_FRAME_ACK; fack.output2newybufferaddress = (void *)(p + b->planar0_off); fack.output2newcbcrbufferaddress = (void *)(p + b->planar1_off); vfecmd->queue = QDSP_CMDQUEUE; vfecmd->length = sizeof(struct vfe_outputack); cmd_data = &fack; } break; case CMD_SNAP_BUF_RELEASE: break; case CMD_STATS_BUF_RELEASE: { CDBG("vfe_7x_config: CMD_STATS_BUF_RELEASE\n"); if (!data) { rc = -EFAULT; goto config_failure; } sack.header = STATS_WE_ACK; sack.bufaddr = (void *)*(uint32_t *)data; vfecmd->queue = QDSP_CMDQUEUE; vfecmd->length = sizeof(struct vfe_stats_ack); cmd_data = &sack; } break; case CMD_STATS_AF_BUF_RELEASE: { CDBG("vfe_7x_config: CMD_STATS_AF_BUF_RELEASE\n"); if (!data) { rc = -EFAULT; goto config_failure; } sack.header = STATS_AF_ACK; sack.bufaddr = (void *)*(uint32_t *)data; vfecmd->queue = QDSP_CMDQUEUE; vfecmd->length = sizeof(struct vfe_stats_ack); cmd_data = &sack; } break; case CMD_GENERAL: case CMD_STATS_DISABLE: { if (vfecmd->length > 256) { cmd_data_alloc = cmd_data = kmalloc(vfecmd->length, GFP_ATOMIC); if (!cmd_data) { rc = -ENOMEM; goto config_failure; } } else cmd_data = buf; if (copy_from_user(cmd_data, (void __user *)(vfecmd->value), vfecmd->length)) { rc = -EFAULT; goto config_done; } if (vfecmd->queue == QDSP_CMDQUEUE) { switch (*(uint32_t *)cmd_data) { case VFE_RESET_CMD: msm_camio_vfe_blk_reset(); vfestopped = 0; break; case VFE_START_CMD: _mode = (uint32_t *)cmd_data; op_mode = *(++_mode); if (op_mode & SNAPSHOT_MASK_MODE) { /* request AXI bus for snapshot */ if (update_axi_qos(MSM_AXI_QOS_SNAPSHOT) < 0) { rc = -EFAULT; goto config_failure; } } else { /* request AXI bus for snapshot */ if (update_axi_qos(MSM_AXI_QOS_PREVIEW) < 0) { rc = -EFAULT; goto config_failure; } } msm_camio_camif_pad_reg_reset_2(); vfestopped = 0; break; case VFE_STOP_CMD: vfestopped = 1; goto config_send; default: break; } } /* QDSP_CMDQUEUE */ } break; case CMD_AXI_CFG_PREVIEW: case CMD_RAW_PICT_AXI_CFG: { axid = data; if (!axid) { rc = -EFAULT; goto config_failure; } axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC); if (!axio) { rc = -ENOMEM; goto config_failure; } if (copy_from_user(axio, (void __user *)(vfecmd->value), sizeof(struct axiout))) { rc = -EFAULT; goto config_done; } vfe_7x_config_axi(OUTPUT_2, axid, axio); cmd_data = axio; } break; case CMD_AXI_CFG_SNAP: { axid = data; if (!axid) { rc = -EFAULT; goto config_failure; } axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC); if (!axio) { rc = -ENOMEM; goto config_failure; } if (copy_from_user(axio, (void __user *)(vfecmd->value), sizeof(struct axiout))) { rc = -EFAULT; goto config_done; } vfe_7x_config_axi(OUTPUT_1_AND_2, axid, axio); cmd_data = axio; } break; default: break; } /* switch */ if (vfestopped) goto config_done; config_send: CDBG("send adsp command = %d\n", *(uint32_t *)cmd_data); if (vfetask_state) rc = msm_adsp_write(vfe_mod, vfecmd->queue, cmd_data, vfecmd->length); config_done: if (cmd_data_alloc != NULL) kfree(cmd_data_alloc); config_failure: kfree(scfg); kfree(axio); kfree(vfecmd); return rc; } void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data) { mutex_init(&vfe_lock); fptr->vfe_init = vfe_7x_init; fptr->vfe_enable = vfe_7x_enable; fptr->vfe_config = vfe_7x_config; fptr->vfe_disable = vfe_7x_disable; fptr->vfe_release = vfe_7x_release; vfe_syncdata = data; } void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data) { fptr->vpe_reg = NULL; fptr->send_frame_to_vpe = NULL; fptr->vpe_config = NULL; fptr->vpe_cfg_update = NULL; fptr->dis = NULL; }
gpl-2.0
lawnn/kernel10c
drivers/dma/ioat/dma_v3.c
1525
38044
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * BSD LICENSE * * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Support routines for v3+ hardware */ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/prefetch.h> #include "../dmaengine.h" #include "registers.h" #include "hw.h" #include "dma.h" #include "dma_v2.h" /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) /* provide a lookup table for setting the source address in the base or * extended descriptor of an xor or pq descriptor */ static const u8 xor_idx_to_desc = 0xe0; static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; static const u8 pq_idx_to_desc = 0xf8; static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) { struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; return raw->field[xor_idx_to_field[idx]]; } static void xor_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, int idx) { struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; raw->field[xor_idx_to_field[idx]] = addr + offset; } static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) { struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; return raw->field[pq_idx_to_field[idx]]; } static void pq_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, u8 coef, int idx) { struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; raw->field[pq_idx_to_field[idx]] = addr + offset; pq->coef[idx] = coef; } static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, int idx) { struct ioat_chan_common *chan = &ioat->base; struct pci_dev *pdev = chan->device->pdev; size_t len = desc->len; size_t offset = len - desc->hw->size; struct dma_async_tx_descriptor *tx = &desc->txd; enum dma_ctrl_flags flags = tx->flags; switch (desc->hw->ctl_f.op) { case IOAT_OP_COPY: if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ ioat_dma_unmap(chan, flags, len, desc->hw); break; case IOAT_OP_FILL: { struct ioat_fill_descriptor *hw = desc->fill; if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) ioat_unmap(pdev, hw->dst_addr - offset, len, PCI_DMA_FROMDEVICE, flags, 1); break; } case IOAT_OP_XOR_VAL: case IOAT_OP_XOR: { struct ioat_xor_descriptor *xor = desc->xor; struct ioat_ring_ent *ext; struct ioat_xor_ext_descriptor *xor_ex = NULL; int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); struct ioat_raw_descriptor *descs[2]; int i; if (src_cnt > 5) { ext = ioat2_get_ring_ent(ioat, idx + 1); xor_ex = ext->xor_ex; } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { descs[0] = (struct ioat_raw_descriptor *) xor; descs[1] = (struct ioat_raw_descriptor *) xor_ex; for (i = 0; i < src_cnt; i++) { dma_addr_t src = xor_get_src(descs, i); ioat_unmap(pdev, src - offset, len, PCI_DMA_TODEVICE, flags, 0); } /* dest is a source in xor validate operations */ if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { ioat_unmap(pdev, xor->dst_addr - offset, len, PCI_DMA_TODEVICE, flags, 1); break; } } if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) ioat_unmap(pdev, xor->dst_addr - offset, len, PCI_DMA_FROMDEVICE, flags, 1); break; } case IOAT_OP_PQ_VAL: case IOAT_OP_PQ: { struct ioat_pq_descriptor *pq = desc->pq; struct ioat_ring_ent *ext; struct ioat_pq_ext_descriptor *pq_ex = NULL; int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); struct ioat_raw_descriptor *descs[2]; int i; if (src_cnt > 3) { ext = ioat2_get_ring_ent(ioat, idx + 1); pq_ex = ext->pq_ex; } /* in the 'continue' case don't unmap the dests as sources */ if (dmaf_p_disabled_continue(flags)) src_cnt--; else if (dmaf_continue(flags)) src_cnt -= 3; if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { descs[0] = (struct ioat_raw_descriptor *) pq; descs[1] = (struct ioat_raw_descriptor *) pq_ex; for (i = 0; i < src_cnt; i++) { dma_addr_t src = pq_get_src(descs, i); ioat_unmap(pdev, src - offset, len, PCI_DMA_TODEVICE, flags, 0); } /* the dests are sources in pq validate operations */ if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { if (!(flags & DMA_PREP_PQ_DISABLE_P)) ioat_unmap(pdev, pq->p_addr - offset, len, PCI_DMA_TODEVICE, flags, 0); if (!(flags & DMA_PREP_PQ_DISABLE_Q)) ioat_unmap(pdev, pq->q_addr - offset, len, PCI_DMA_TODEVICE, flags, 0); break; } } if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!(flags & DMA_PREP_PQ_DISABLE_P)) ioat_unmap(pdev, pq->p_addr - offset, len, PCI_DMA_BIDIRECTIONAL, flags, 1); if (!(flags & DMA_PREP_PQ_DISABLE_Q)) ioat_unmap(pdev, pq->q_addr - offset, len, PCI_DMA_BIDIRECTIONAL, flags, 1); } break; } default: dev_err(&pdev->dev, "%s: unknown op type: %#x\n", __func__, desc->hw->ctl_f.op); } } static bool desc_has_ext(struct ioat_ring_ent *desc) { struct ioat_dma_descriptor *hw = desc->hw; if (hw->ctl_f.op == IOAT_OP_XOR || hw->ctl_f.op == IOAT_OP_XOR_VAL) { struct ioat_xor_descriptor *xor = desc->xor; if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) return true; } else if (hw->ctl_f.op == IOAT_OP_PQ || hw->ctl_f.op == IOAT_OP_PQ_VAL) { struct ioat_pq_descriptor *pq = desc->pq; if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) return true; } return false; } /** * __cleanup - reclaim used descriptors * @ioat: channel (ring) to clean * * The difference from the dma_v2.c __cleanup() is that this routine * handles extended descriptors and dma-unmapping raid operations. */ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) { struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent *desc; bool seen_current = false; int idx = ioat->tail, i; u16 active; dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { struct dma_async_tx_descriptor *tx; smp_read_barrier_depends(); prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); desc = ioat2_get_ring_ent(ioat, idx + i); dump_desc_dbg(ioat, desc); tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); ioat3_dma_unmap(ioat, desc, idx + i); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; } } if (tx->phys == phys_complete) seen_current = true; /* skip extended descriptors */ if (desc_has_ext(desc)) { BUG_ON(i + 1 >= active); i++; } } smp_mb(); /* finish all descriptor reads before incrementing tail */ ioat->tail = idx + i; BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; if (active - i == 0) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } /* 5 microsecond delay per pending descriptor */ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), chan->device->reg_base + IOAT_INTRDELAY_OFFSET); } static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; dma_addr_t phys_complete; spin_lock_bh(&chan->cleanup_lock); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); spin_unlock_bh(&chan->cleanup_lock); } static void ioat3_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); ioat3_cleanup(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; dma_addr_t phys_complete; ioat2_quiesce(chan, 0); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); __ioat2_restart_chan(ioat); } static void ioat3_timer_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { dma_addr_t phys_complete; u64 status; status = ioat_chansts(chan); /* when halted due to errors check for channel * programming errors before advancing the completion state */ if (is_ioat_halted(status)) { u32 chanerr; chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "%s: Channel halted (%x)\n", __func__, chanerr); if (test_bit(IOAT_RUN, &chan->state)) BUG_ON(is_ioat_bug(chanerr)); else /* we never got off the ground */ return; } /* if we haven't made progress and we have already * acknowledged a pending completion once, then be more * forceful with a restart */ spin_lock_bh(&chan->cleanup_lock); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { spin_lock_bh(&ioat->prep_lock); ioat3_restart_channel(ioat); spin_unlock_bh(&ioat->prep_lock); } else { set_bit(IOAT_COMPLETION_ACK, &chan->state); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); } spin_unlock_bh(&chan->cleanup_lock); } else { u16 active; /* if the ring is idle, empty, and oversized try to step * down the size */ spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); active = ioat2_ring_active(ioat); if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) reshape_ring(ioat, ioat->alloc_order-1); spin_unlock_bh(&ioat->prep_lock); spin_unlock_bh(&chan->cleanup_lock); /* keep shrinking until we get back to our minimum * default size */ if (ioat->alloc_order > ioat_get_alloc_order()) mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } } static enum dma_status ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); if (ret == DMA_SUCCESS) return ret; ioat3_cleanup(ioat); return dma_cookie_status(c, cookie, txstate); } static struct dma_async_tx_descriptor * ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, size_t len, unsigned long flags) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_ring_ent *desc; size_t total_len = len; struct ioat_fill_descriptor *fill; u64 src_data = (0x0101010101010101ULL) * (value & 0xff); int num_descs, idx, i; num_descs = ioat2_xferlen_to_descs(ioat, len); if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) idx = ioat->head; else return NULL; i = 0; do { size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); desc = ioat2_get_ring_ent(ioat, idx + i); fill = desc->fill; fill->size = xfer_size; fill->src_data = src_data; fill->dst_addr = dest; fill->ctl = 0; fill->ctl_f.op = IOAT_OP_FILL; len -= xfer_size; dest += xfer_size; dump_desc_dbg(ioat, desc); } while (++i < num_descs); desc->txd.flags = flags; desc->len = total_len; fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); fill->ctl_f.compl_write = 1; dump_desc_dbg(ioat, desc); /* we leave the channel locked to ensure in order submission */ return &desc->txd; } static struct dma_async_tx_descriptor * __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *desc; struct ioat_ring_ent *ext; size_t total_len = len; struct ioat_xor_descriptor *xor; struct ioat_xor_ext_descriptor *xor_ex = NULL; struct ioat_dma_descriptor *hw; int num_descs, with_ext, idx, i; u32 offset = 0; u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; BUG_ON(src_cnt < 2); num_descs = ioat2_xferlen_to_descs(ioat, len); /* we need 2x the number of descriptors to cover greater than 5 * sources */ if (src_cnt > 5) { with_ext = 1; num_descs *= 2; } else with_ext = 0; /* completion writes from the raid engine may pass completion * writes from the legacy engine, so we need one extra null * (legacy) descriptor to ensure all completion writes arrive in * order. */ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) idx = ioat->head; else return NULL; i = 0; do { struct ioat_raw_descriptor *descs[2]; size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); int s; desc = ioat2_get_ring_ent(ioat, idx + i); xor = desc->xor; /* save a branch by unconditionally retrieving the * extended descriptor xor_set_src() knows to not write * to it in the single descriptor case */ ext = ioat2_get_ring_ent(ioat, idx + i + 1); xor_ex = ext->xor_ex; descs[0] = (struct ioat_raw_descriptor *) xor; descs[1] = (struct ioat_raw_descriptor *) xor_ex; for (s = 0; s < src_cnt; s++) xor_set_src(descs, src[s], offset, s); xor->size = xfer_size; xor->dst_addr = dest + offset; xor->ctl = 0; xor->ctl_f.op = op; xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); len -= xfer_size; offset += xfer_size; dump_desc_dbg(ioat, desc); } while ((i += 1 + with_ext) < num_descs); /* last xor descriptor carries the unmap parameters and fence bit */ desc->txd.flags = flags; desc->len = total_len; if (result) desc->result = result; xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); /* completion descriptor carries interrupt bit */ compl_desc = ioat2_get_ring_ent(ioat, idx + i); compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; hw = compl_desc->hw; hw->ctl = 0; hw->ctl_f.null = 1; hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; hw->size = NULL_DESC_BUFFER_SIZE; dump_desc_dbg(ioat, compl_desc); /* we leave the channel locked to ensure in order submission */ return &compl_desc->txd; } static struct dma_async_tx_descriptor * ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); } struct dma_async_tx_descriptor * ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { /* the cleanup routine only sets bits on validate failure, it * does not clear bits on validate success... so clear it here */ *result = 0; return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], src_cnt - 1, len, flags); } static void dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) { struct device *dev = to_dev(&ioat->base); struct ioat_pq_descriptor *pq = desc->pq; struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); int i; dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", desc_id(desc), (unsigned long long) desc->txd.phys, (unsigned long long) (pq_ex ? pq_ex->next : pq->next), desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, pq->ctl_f.compl_write, pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", pq->ctl_f.src_cnt); for (i = 0; i < src_cnt; i++) dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, (unsigned long long) pq_get_src(descs, i), pq->coef[i]); dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); } static struct dma_async_tx_descriptor * __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, const dma_addr_t *dst, const dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *desc; struct ioat_ring_ent *ext; size_t total_len = len; struct ioat_pq_descriptor *pq; struct ioat_pq_ext_descriptor *pq_ex = NULL; struct ioat_dma_descriptor *hw; u32 offset = 0; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; int i, s, idx, with_ext, num_descs; dev_dbg(to_dev(chan), "%s\n", __func__); /* the engine requires at least two sources (we provide * at least 1 implied source in the DMA_PREP_CONTINUE case) */ BUG_ON(src_cnt + dmaf_continue(flags) < 2); num_descs = ioat2_xferlen_to_descs(ioat, len); /* we need 2x the number of descriptors to cover greater than 3 * sources (we need 1 extra source in the q-only continuation * case and 3 extra sources in the p+q continuation case. */ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { with_ext = 1; num_descs *= 2; } else with_ext = 0; /* completion writes from the raid engine may pass completion * writes from the legacy engine, so we need one extra null * (legacy) descriptor to ensure all completion writes arrive in * order. */ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) idx = ioat->head; else return NULL; i = 0; do { struct ioat_raw_descriptor *descs[2]; size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); desc = ioat2_get_ring_ent(ioat, idx + i); pq = desc->pq; /* save a branch by unconditionally retrieving the * extended descriptor pq_set_src() knows to not write * to it in the single descriptor case */ ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); pq_ex = ext->pq_ex; descs[0] = (struct ioat_raw_descriptor *) pq; descs[1] = (struct ioat_raw_descriptor *) pq_ex; for (s = 0; s < src_cnt; s++) pq_set_src(descs, src[s], offset, scf[s], s); /* see the comment for dma_maxpq in include/linux/dmaengine.h */ if (dmaf_p_disabled_continue(flags)) pq_set_src(descs, dst[1], offset, 1, s++); else if (dmaf_continue(flags)) { pq_set_src(descs, dst[0], offset, 0, s++); pq_set_src(descs, dst[1], offset, 1, s++); pq_set_src(descs, dst[1], offset, 0, s++); } pq->size = xfer_size; pq->p_addr = dst[0] + offset; pq->q_addr = dst[1] + offset; pq->ctl = 0; pq->ctl_f.op = op; pq->ctl_f.src_cnt = src_cnt_to_hw(s); pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); len -= xfer_size; offset += xfer_size; } while ((i += 1 + with_ext) < num_descs); /* last pq descriptor carries the unmap parameters and fence bit */ desc->txd.flags = flags; desc->len = total_len; if (result) desc->result = result; pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); dump_pq_desc_dbg(ioat, desc, ext); /* completion descriptor carries interrupt bit */ compl_desc = ioat2_get_ring_ent(ioat, idx + i); compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; hw = compl_desc->hw; hw->ctl = 0; hw->ctl_f.null = 1; hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; hw->size = NULL_DESC_BUFFER_SIZE; dump_desc_dbg(ioat, compl_desc); /* we leave the channel locked to ensure in order submission */ return &compl_desc->txd; } static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; if (flags & DMA_PREP_PQ_DISABLE_Q) dst[1] = dst[0]; /* handle the single source multiply case from the raid6 * recovery path */ if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { dma_addr_t single_source[2]; unsigned char single_source_coef[2]; BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); single_source[0] = src[0]; single_source[1] = src[0]; single_source_coef[0] = scf[0]; single_source_coef[1] = 0; return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, single_source_coef, len, flags); } else return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, len, flags); } struct dma_async_tx_descriptor * ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; if (flags & DMA_PREP_PQ_DISABLE_Q) pq[1] = pq[0]; /* the cleanup routine only sets bits on validate failure, it * does not clear bits on validate success... so clear it here */ *pqres = 0; return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, flags); } static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { unsigned char scf[src_cnt]; dma_addr_t pq[2]; memset(scf, 0, src_cnt); pq[0] = dst; flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, flags); } struct dma_async_tx_descriptor * ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { unsigned char scf[src_cnt]; dma_addr_t pq[2]; /* the cleanup routine only sets bits on validate failure, it * does not clear bits on validate success... so clear it here */ *result = 0; memset(scf, 0, src_cnt); pq[0] = src[0]; flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, len, flags); } static struct dma_async_tx_descriptor * ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; if (ioat2_check_space_lock(ioat, 1) == 0) desc = ioat2_get_ring_ent(ioat, ioat->head); else return NULL; hw = desc->hw; hw->ctl = 0; hw->ctl_f.null = 1; hw->ctl_f.int_en = 1; hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); hw->ctl_f.compl_write = 1; hw->size = NULL_DESC_BUFFER_SIZE; hw->src_addr = 0; hw->dst_addr = 0; desc->txd.flags = flags; desc->len = 1; dump_desc_dbg(ioat, desc); /* we leave the channel locked to ensure in order submission */ return &desc->txd; } static void __devinit ioat3_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; complete(cmp); } #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) { int i, src_idx; struct page *dest; struct page *xor_srcs[IOAT_NUM_SRC_TEST]; struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; dma_addr_t dma_addr, dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; u32 xor_val_result; int err = 0; struct completion cmp; unsigned long tmo; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; dev_dbg(dev, "%s\n", __func__); if (!dma_has_cap(DMA_XOR, dma->cap_mask)) return 0; for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } } dest = alloc_page(GFP_KERNEL); if (!dest) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); dma_chan = container_of(dma->channels.next, struct dma_chan, device_node); if (dma->device_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } /* test xor */ dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); err = -ENODEV; goto free_resources; } async_tx_ack(tx); init_completion(&cmp); tx->callback = ioat3_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { dev_err(dev, "Self-test xor setup failed\n"); err = -ENODEV; goto free_resources; } dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto free_resources; } dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_err(dev, "Self-test xor failed compare\n"); err = -ENODEV; goto free_resources; } } dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); /* skip validate if the capability is not present */ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) goto free_resources; /* validate the sources with the destintation page */ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) xor_val_srcs[i] = xor_srcs[i]; xor_val_srcs[i] = dest; xor_val_result = 1; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; goto free_resources; } async_tx_ack(tx); init_completion(&cmp); tx->callback = ioat3_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { dev_err(dev, "Self-test zero setup failed\n"); err = -ENODEV; goto free_resources; } dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto free_resources; } if (xor_val_result != 0) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; goto free_resources; } /* skip memset if the capability is not present */ if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) goto free_resources; /* test memset */ dma_addr = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test memset prep failed\n"); err = -ENODEV; goto free_resources; } async_tx_ack(tx); init_completion(&cmp); tx->callback = ioat3_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { dev_err(dev, "Self-test memset setup failed\n"); err = -ENODEV; goto free_resources; } dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test memset timed out\n"); err = -ENODEV; goto free_resources; } for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { u32 *ptr = page_address(dest); if (ptr[i]) { dev_err(dev, "Self-test memset failed compare\n"); err = -ENODEV; goto free_resources; } } /* test for non-zero parity sum */ xor_val_result = 0; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; goto free_resources; } async_tx_ack(tx); init_completion(&cmp); tx->callback = ioat3_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { dev_err(dev, "Self-test 2nd zero setup failed\n"); err = -ENODEV; goto free_resources; } dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto free_resources; } if (xor_val_result != SUM_CHECK_P_RESULT) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; goto free_resources; } free_resources: dma->device_free_chan_resources(dma_chan); out: src_idx = IOAT_NUM_SRC_TEST; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) { int rc = ioat_dma_self_test(device); if (rc) return rc; rc = ioat_xor_val_self_test(device); if (rc) return rc; return 0; } static int ioat3_reset_hw(struct ioat_chan_common *chan) { /* throw away whatever the channel was doing and get it * initialized, with ioat3 specific workarounds */ struct ioatdma_device *device = chan->device; struct pci_dev *pdev = device->pdev; u32 chanerr; u16 dev_id; int err; ioat2_quiesce(chan, msecs_to_jiffies(100)); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); /* -= IOAT ver.3 workarounds =- */ /* Write CHANERRMSK_INT with 3E07h to mask out the errors * that can cause stability issues for IOAT ver.3, and clear any * pending errors */ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { dev_err(&pdev->dev, "channel error register unreachable\n"); return err; } pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit * (workaround for spurious config parity error after restart) */ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); return ioat2_reset_sync(chan, msecs_to_jiffies(200)); } static bool is_jf_ioat(struct pci_dev *pdev) { switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IOAT_JSF0: case PCI_DEVICE_ID_INTEL_IOAT_JSF1: case PCI_DEVICE_ID_INTEL_IOAT_JSF2: case PCI_DEVICE_ID_INTEL_IOAT_JSF3: case PCI_DEVICE_ID_INTEL_IOAT_JSF4: case PCI_DEVICE_ID_INTEL_IOAT_JSF5: case PCI_DEVICE_ID_INTEL_IOAT_JSF6: case PCI_DEVICE_ID_INTEL_IOAT_JSF7: case PCI_DEVICE_ID_INTEL_IOAT_JSF8: case PCI_DEVICE_ID_INTEL_IOAT_JSF9: return true; default: return false; } } static bool is_snb_ioat(struct pci_dev *pdev) { switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IOAT_SNB0: case PCI_DEVICE_ID_INTEL_IOAT_SNB1: case PCI_DEVICE_ID_INTEL_IOAT_SNB2: case PCI_DEVICE_ID_INTEL_IOAT_SNB3: case PCI_DEVICE_ID_INTEL_IOAT_SNB4: case PCI_DEVICE_ID_INTEL_IOAT_SNB5: case PCI_DEVICE_ID_INTEL_IOAT_SNB6: case PCI_DEVICE_ID_INTEL_IOAT_SNB7: case PCI_DEVICE_ID_INTEL_IOAT_SNB8: case PCI_DEVICE_ID_INTEL_IOAT_SNB9: return true; default: return false; } } int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; int dca_en = system_has_dca_enabled(pdev); struct dma_device *dma; struct dma_chan *c; struct ioat_chan_common *chan; bool is_raid_device = false; int err; u32 cap; device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat3_reset_hw; device->self_test = ioat3_dma_self_test; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) dma->copy_align = 6; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); /* dca is incompatible with raid operations */ if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); if (cap & IOAT_CAP_XOR) { is_raid_device = true; dma->max_xor = 8; dma->xor_align = 6; dma_cap_set(DMA_XOR, dma->cap_mask); dma->device_prep_dma_xor = ioat3_prep_xor; dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } if (cap & IOAT_CAP_PQ) { is_raid_device = true; dma_set_maxpq(dma, 8, 0); dma->pq_align = 6; dma_cap_set(DMA_PQ, dma->cap_mask); dma->device_prep_dma_pq = ioat3_prep_pq; dma_cap_set(DMA_PQ_VAL, dma->cap_mask); dma->device_prep_dma_pq_val = ioat3_prep_pq_val; if (!(cap & IOAT_CAP_XOR)) { dma->max_xor = 8; dma->xor_align = 6; dma_cap_set(DMA_XOR, dma->cap_mask); dma->device_prep_dma_xor = ioat3_prep_pqxor; dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; } } if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { dma_cap_set(DMA_MEMSET, dma->cap_mask); dma->device_prep_dma_memset = ioat3_prep_memset_lock; } if (is_raid_device) { dma->device_tx_status = ioat3_tx_status; device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; } else { dma->device_tx_status = ioat_dma_tx_status; device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; } #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); dma->device_prep_dma_pq_val = NULL; #endif #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = NULL; #endif err = ioat_probe(device); if (err) return err; ioat_set_tcp_copy_break(262144); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); writel(IOAT_DMA_DCA_ANY_CPU, chan->reg_base + IOAT_DCACTRL_OFFSET); } err = ioat_register(device); if (err) return err; ioat_kobject_add(device, &ioat2_ktype); if (dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; }
gpl-2.0
SlimRoms/kernel_samsung_aries
drivers/mfd/max8997-irq.c
2037
10848
/* * max8997-irq.c - Interrupt controller support for MAX8997 * * Copyright (C) 2011 Samsung Electronics Co.Ltd * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * This driver is based on max8998-irq.c */ #include <linux/err.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/mfd/max8997.h> #include <linux/mfd/max8997-private.h> static const u8 max8997_mask_reg[] = { [PMIC_INT1] = MAX8997_REG_INT1MSK, [PMIC_INT2] = MAX8997_REG_INT2MSK, [PMIC_INT3] = MAX8997_REG_INT3MSK, [PMIC_INT4] = MAX8997_REG_INT4MSK, [FUEL_GAUGE] = MAX8997_REG_INVALID, [MUIC_INT1] = MAX8997_MUIC_REG_INTMASK1, [MUIC_INT2] = MAX8997_MUIC_REG_INTMASK2, [MUIC_INT3] = MAX8997_MUIC_REG_INTMASK3, [GPIO_LOW] = MAX8997_REG_INVALID, [GPIO_HI] = MAX8997_REG_INVALID, [FLASH_STATUS] = MAX8997_REG_INVALID, }; static struct i2c_client *get_i2c(struct max8997_dev *max8997, enum max8997_irq_source src) { switch (src) { case PMIC_INT1 ... PMIC_INT4: return max8997->i2c; case FUEL_GAUGE: return NULL; case MUIC_INT1 ... MUIC_INT3: return max8997->muic; case GPIO_LOW ... GPIO_HI: return max8997->i2c; case FLASH_STATUS: return max8997->i2c; default: return ERR_PTR(-EINVAL); } return ERR_PTR(-EINVAL); } struct max8997_irq_data { int mask; enum max8997_irq_source group; }; #define DECLARE_IRQ(idx, _group, _mask) \ [(idx)] = { .group = (_group), .mask = (_mask) } static const struct max8997_irq_data max8997_irqs[] = { DECLARE_IRQ(MAX8997_PMICIRQ_PWRONR, PMIC_INT1, 1 << 0), DECLARE_IRQ(MAX8997_PMICIRQ_PWRONF, PMIC_INT1, 1 << 1), DECLARE_IRQ(MAX8997_PMICIRQ_PWRON1SEC, PMIC_INT1, 1 << 3), DECLARE_IRQ(MAX8997_PMICIRQ_JIGONR, PMIC_INT1, 1 << 4), DECLARE_IRQ(MAX8997_PMICIRQ_JIGONF, PMIC_INT1, 1 << 5), DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT2, PMIC_INT1, 1 << 6), DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT1, PMIC_INT1, 1 << 7), DECLARE_IRQ(MAX8997_PMICIRQ_JIGR, PMIC_INT2, 1 << 0), DECLARE_IRQ(MAX8997_PMICIRQ_JIGF, PMIC_INT2, 1 << 1), DECLARE_IRQ(MAX8997_PMICIRQ_MR, PMIC_INT2, 1 << 2), DECLARE_IRQ(MAX8997_PMICIRQ_DVS1OK, PMIC_INT2, 1 << 3), DECLARE_IRQ(MAX8997_PMICIRQ_DVS2OK, PMIC_INT2, 1 << 4), DECLARE_IRQ(MAX8997_PMICIRQ_DVS3OK, PMIC_INT2, 1 << 5), DECLARE_IRQ(MAX8997_PMICIRQ_DVS4OK, PMIC_INT2, 1 << 6), DECLARE_IRQ(MAX8997_PMICIRQ_CHGINS, PMIC_INT3, 1 << 0), DECLARE_IRQ(MAX8997_PMICIRQ_CHGRM, PMIC_INT3, 1 << 1), DECLARE_IRQ(MAX8997_PMICIRQ_DCINOVP, PMIC_INT3, 1 << 2), DECLARE_IRQ(MAX8997_PMICIRQ_TOPOFFR, PMIC_INT3, 1 << 3), DECLARE_IRQ(MAX8997_PMICIRQ_CHGRSTF, PMIC_INT3, 1 << 5), DECLARE_IRQ(MAX8997_PMICIRQ_MBCHGTMEXPD, PMIC_INT3, 1 << 7), DECLARE_IRQ(MAX8997_PMICIRQ_RTC60S, PMIC_INT4, 1 << 0), DECLARE_IRQ(MAX8997_PMICIRQ_RTCA1, PMIC_INT4, 1 << 1), DECLARE_IRQ(MAX8997_PMICIRQ_RTCA2, PMIC_INT4, 1 << 2), DECLARE_IRQ(MAX8997_PMICIRQ_SMPL_INT, PMIC_INT4, 1 << 3), DECLARE_IRQ(MAX8997_PMICIRQ_RTC1S, PMIC_INT4, 1 << 4), DECLARE_IRQ(MAX8997_PMICIRQ_WTSR, PMIC_INT4, 1 << 5), DECLARE_IRQ(MAX8997_MUICIRQ_ADCError, MUIC_INT1, 1 << 2), DECLARE_IRQ(MAX8997_MUICIRQ_ADCLow, MUIC_INT1, 1 << 1), DECLARE_IRQ(MAX8997_MUICIRQ_ADC, MUIC_INT1, 1 << 0), DECLARE_IRQ(MAX8997_MUICIRQ_VBVolt, MUIC_INT2, 1 << 4), DECLARE_IRQ(MAX8997_MUICIRQ_DBChg, MUIC_INT2, 1 << 3), DECLARE_IRQ(MAX8997_MUICIRQ_DCDTmr, MUIC_INT2, 1 << 2), DECLARE_IRQ(MAX8997_MUICIRQ_ChgDetRun, MUIC_INT2, 1 << 1), DECLARE_IRQ(MAX8997_MUICIRQ_ChgTyp, MUIC_INT2, 1 << 0), DECLARE_IRQ(MAX8997_MUICIRQ_OVP, MUIC_INT3, 1 << 2), }; static void max8997_irq_lock(struct irq_data *data) { struct max8997_dev *max8997 = irq_get_chip_data(data->irq); mutex_lock(&max8997->irqlock); } static void max8997_irq_sync_unlock(struct irq_data *data) { struct max8997_dev *max8997 = irq_get_chip_data(data->irq); int i; for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { u8 mask_reg = max8997_mask_reg[i]; struct i2c_client *i2c = get_i2c(max8997, i); if (mask_reg == MAX8997_REG_INVALID || IS_ERR_OR_NULL(i2c)) continue; max8997->irq_masks_cache[i] = max8997->irq_masks_cur[i]; max8997_write_reg(i2c, max8997_mask_reg[i], max8997->irq_masks_cur[i]); } mutex_unlock(&max8997->irqlock); } static const inline struct max8997_irq_data * irq_to_max8997_irq(struct max8997_dev *max8997, int irq) { return &max8997_irqs[irq - max8997->irq_base]; } static void max8997_irq_mask(struct irq_data *data) { struct max8997_dev *max8997 = irq_get_chip_data(data->irq); const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, data->irq); max8997->irq_masks_cur[irq_data->group] |= irq_data->mask; } static void max8997_irq_unmask(struct irq_data *data) { struct max8997_dev *max8997 = irq_get_chip_data(data->irq); const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, data->irq); max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask; } static struct irq_chip max8997_irq_chip = { .name = "max8997", .irq_bus_lock = max8997_irq_lock, .irq_bus_sync_unlock = max8997_irq_sync_unlock, .irq_mask = max8997_irq_mask, .irq_unmask = max8997_irq_unmask, }; #define MAX8997_IRQSRC_PMIC (1 << 1) #define MAX8997_IRQSRC_FUELGAUGE (1 << 2) #define MAX8997_IRQSRC_MUIC (1 << 3) #define MAX8997_IRQSRC_GPIO (1 << 4) #define MAX8997_IRQSRC_FLASH (1 << 5) static irqreturn_t max8997_irq_thread(int irq, void *data) { struct max8997_dev *max8997 = data; u8 irq_reg[MAX8997_IRQ_GROUP_NR] = {}; u8 irq_src; int ret; int i; ret = max8997_read_reg(max8997->i2c, MAX8997_REG_INTSRC, &irq_src); if (ret < 0) { dev_err(max8997->dev, "Failed to read interrupt source: %d\n", ret); return IRQ_NONE; } if (irq_src & MAX8997_IRQSRC_PMIC) { /* PMIC INT1 ~ INT4 */ max8997_bulk_read(max8997->i2c, MAX8997_REG_INT1, 4, &irq_reg[PMIC_INT1]); } if (irq_src & MAX8997_IRQSRC_FUELGAUGE) { /* * TODO: FUEL GAUGE * * This is to be supported by Max17042 driver. When * an interrupt incurs here, it should be relayed to a * Max17042 device that is connected (probably by * platform-data). However, we do not have interrupt * handling in Max17042 driver currently. The Max17042 IRQ * driver should be ready to be used as a stand-alone device and * a Max8997-dependent device. Because it is not ready in * Max17042-side and it is not too critical in operating * Max8997, we do not implement this in initial releases. */ irq_reg[FUEL_GAUGE] = 0; } if (irq_src & MAX8997_IRQSRC_MUIC) { /* MUIC INT1 ~ INT3 */ max8997_bulk_read(max8997->muic, MAX8997_MUIC_REG_INT1, 3, &irq_reg[MUIC_INT1]); } if (irq_src & MAX8997_IRQSRC_GPIO) { /* GPIO Interrupt */ u8 gpio_info[MAX8997_NUM_GPIO]; irq_reg[GPIO_LOW] = 0; irq_reg[GPIO_HI] = 0; max8997_bulk_read(max8997->i2c, MAX8997_REG_GPIOCNTL1, MAX8997_NUM_GPIO, gpio_info); for (i = 0; i < MAX8997_NUM_GPIO; i++) { bool interrupt = false; switch (gpio_info[i] & MAX8997_GPIO_INT_MASK) { case MAX8997_GPIO_INT_BOTH: if (max8997->gpio_status[i] != gpio_info[i]) interrupt = true; break; case MAX8997_GPIO_INT_RISE: if ((max8997->gpio_status[i] != gpio_info[i]) && (gpio_info[i] & MAX8997_GPIO_DATA_MASK)) interrupt = true; break; case MAX8997_GPIO_INT_FALL: if ((max8997->gpio_status[i] != gpio_info[i]) && !(gpio_info[i] & MAX8997_GPIO_DATA_MASK)) interrupt = true; break; default: break; } if (interrupt) { if (i < 8) irq_reg[GPIO_LOW] |= (1 << i); else irq_reg[GPIO_HI] |= (1 << (i - 8)); } } } if (irq_src & MAX8997_IRQSRC_FLASH) { /* Flash Status Interrupt */ ret = max8997_read_reg(max8997->i2c, MAX8997_REG_FLASHSTATUS, &irq_reg[FLASH_STATUS]); } /* Apply masking */ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) irq_reg[i] &= ~max8997->irq_masks_cur[i]; /* Report */ for (i = 0; i < MAX8997_IRQ_NR; i++) { if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask) handle_nested_irq(max8997->irq_base + i); } return IRQ_HANDLED; } int max8997_irq_resume(struct max8997_dev *max8997) { if (max8997->irq && max8997->irq_base) max8997_irq_thread(max8997->irq_base, max8997); return 0; } int max8997_irq_init(struct max8997_dev *max8997) { int i; int cur_irq; int ret; u8 val; if (!max8997->irq) { dev_warn(max8997->dev, "No interrupt specified.\n"); max8997->irq_base = 0; return 0; } if (!max8997->irq_base) { dev_err(max8997->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max8997->irqlock); /* Mask individual interrupt sources */ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; max8997->irq_masks_cur[i] = 0xff; max8997->irq_masks_cache[i] = 0xff; i2c = get_i2c(max8997, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max8997_mask_reg[i] == MAX8997_REG_INVALID) continue; max8997_write_reg(i2c, max8997_mask_reg[i], 0xff); } for (i = 0; i < MAX8997_NUM_GPIO; i++) { max8997->gpio_status[i] = (max8997_read_reg(max8997->i2c, MAX8997_REG_GPIOCNTL1 + i, &val) & MAX8997_GPIO_DATA_MASK) ? true : false; } /* Register with genirq */ for (i = 0; i < MAX8997_IRQ_NR; i++) { cur_irq = i + max8997->irq_base; irq_set_chip_data(cur_irq, max8997); irq_set_chip_and_handler(cur_irq, &max8997_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max8997-irq", max8997); if (ret) { dev_err(max8997->dev, "Failed to request IRQ %d: %d\n", max8997->irq, ret); return ret; } if (!max8997->ono) return 0; ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "max8997-ono", max8997); if (ret) dev_err(max8997->dev, "Failed to request ono-IRQ %d: %d\n", max8997->ono, ret); return 0; } void max8997_irq_exit(struct max8997_dev *max8997) { if (max8997->ono) free_irq(max8997->ono, max8997); if (max8997->irq) free_irq(max8997->irq, max8997); }
gpl-2.0
GHlmh/linux-toradexboard
arch/arm/mach-at91/board-eco920.c
2293
3619
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/cpu.h> #include "generic.h" static void __init eco920_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); at91rm9200_initialize(18432000); /* Setup the LEDs */ at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1); /* DBGU on ttyS0. (Rx & Tx only */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init eco920_init_irq(void) { at91rm9200_init_interrupts(NULL); } static struct at91_eth_data __initdata eco920_eth_data = { .phy_irq_pin = AT91_PIN_PC2, .is_rmii = 1, }; static struct at91_usbh_data __initdata eco920_usbh_data = { .ports = 1, }; static struct at91_udc_data __initdata eco920_udc_data = { .vbus_pin = AT91_PIN_PB12, .pullup_pin = AT91_PIN_PB13, }; static struct at91_mmc_data __initdata eco920_mmc_data = { .slot_b = 0, .wire4 = 0, }; static struct physmap_flash_data eco920_flash_data = { .width = 2, }; static struct resource eco920_flash_resource = { .start = 0x11000000, .end = 0x11ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device eco920_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &eco920_flash_data, }, .resource = &eco920_flash_resource, .num_resources = 1, }; static struct spi_board_info eco920_spi_devices[] = { { /* CAN controller */ .modalias = "tlv5638", .chip_select = 3, .max_speed_hz = 20 * 1000 * 1000, .mode = SPI_CPHA, }, }; static void __init eco920_board_init(void) { at91_add_device_serial(); at91_add_device_eth(&eco920_eth_data); at91_add_device_usbh(&eco920_usbh_data); at91_add_device_udc(&eco920_udc_data); at91_add_device_mmc(0, &eco920_mmc_data); platform_device_register(&eco920_flash); at91_sys_write(AT91_SMC_CSR(7), AT91_SMC_RWHOLD_(1) | AT91_SMC_RWSETUP_(1) | AT91_SMC_DBW_8 | AT91_SMC_WSEN | AT91_SMC_NWS_(15)); at91_set_A_periph(AT91_PIN_PC6, 1); at91_set_gpio_input(AT91_PIN_PA23, 0); at91_set_deglitch(AT91_PIN_PA23, 1); /* Initialization of the Static Memory Controller for Chip Select 3 */ at91_sys_write(AT91_SMC_CSR(3), AT91_SMC_DBW_16 | /* 16 bit */ AT91_SMC_WSEN | AT91_SMC_NWS_(5) | /* wait states */ AT91_SMC_TDF_(1) /* float time */ ); at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices)); } MACHINE_START(ECO920, "eco920") /* Maintainer: Sascha Hauer */ .timer = &at91rm9200_timer, .map_io = at91rm9200_map_io, .init_early = eco920_init_early, .init_irq = eco920_init_irq, .init_machine = eco920_board_init, MACHINE_END
gpl-2.0
surdupetru/p6-kernel
drivers/spi/spi_fsl_lib.c
2549
5933
/* * Freescale SPI/eSPI controller driver library. * * Maintainer: Kumar Gala * * Copyright (C) 2006 Polycom, Inc. * * CPM SPI and QE buffer descriptors mode support: * Copyright (c) 2009 MontaVista Software, Inc. * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/of_platform.h> #include <linux/of_spi.h> #include <sysdev/fsl_soc.h> #include "spi_fsl_lib.h" #define MPC8XXX_SPI_RX_BUF(type) \ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ { \ type *rx = mpc8xxx_spi->rx; \ *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ mpc8xxx_spi->rx = rx; \ } #define MPC8XXX_SPI_TX_BUF(type) \ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ { \ u32 data; \ const type *tx = mpc8xxx_spi->tx; \ if (!tx) \ return 0; \ data = *tx++ << mpc8xxx_spi->tx_shift; \ mpc8xxx_spi->tx = tx; \ return data; \ } MPC8XXX_SPI_RX_BUF(u8) MPC8XXX_SPI_RX_BUF(u16) MPC8XXX_SPI_RX_BUF(u32) MPC8XXX_SPI_TX_BUF(u8) MPC8XXX_SPI_TX_BUF(u16) MPC8XXX_SPI_TX_BUF(u32) struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) { return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); } void mpc8xxx_spi_work(struct work_struct *work) { struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, work); spin_lock_irq(&mpc8xxx_spi->lock); while (!list_empty(&mpc8xxx_spi->queue)) { struct spi_message *m = container_of(mpc8xxx_spi->queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irq(&mpc8xxx_spi->lock); if (mpc8xxx_spi->spi_do_one_msg) mpc8xxx_spi->spi_do_one_msg(m); spin_lock_irq(&mpc8xxx_spi->lock); } spin_unlock_irq(&mpc8xxx_spi->lock); } int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); unsigned long flags; m->actual_length = 0; m->status = -EINPROGRESS; spin_lock_irqsave(&mpc8xxx_spi->lock, flags); list_add_tail(&m->queue, &mpc8xxx_spi->queue); queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); return 0; } void mpc8xxx_spi_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } const char *mpc8xxx_spi_strmode(unsigned int flags) { if (flags & SPI_QE_CPU_MODE) { return "QE CPU"; } else if (flags & SPI_CPM_MODE) { if (flags & SPI_QE) return "QE"; else if (flags & SPI_CPM2) return "CPM2"; else return "CPM1"; } return "CPU"; } int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; int ret = 0; master = dev_get_drvdata(dev); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_LOOP; master->transfer = mpc8xxx_spi_transfer; master->cleanup = mpc8xxx_spi_cleanup; master->dev.of_node = dev->of_node; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->dev = dev; mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; mpc8xxx_spi->flags = pdata->flags; mpc8xxx_spi->spibrg = pdata->sysclk; mpc8xxx_spi->irq = irq; mpc8xxx_spi->rx_shift = 0; mpc8xxx_spi->tx_shift = 0; init_completion(&mpc8xxx_spi->done); master->bus_num = pdata->bus_num; master->num_chipselect = pdata->max_chipselect; spin_lock_init(&mpc8xxx_spi->lock); init_completion(&mpc8xxx_spi->done); INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); INIT_LIST_HEAD(&mpc8xxx_spi->queue); mpc8xxx_spi->workqueue = create_singlethread_workqueue( dev_name(master->dev.parent)); if (mpc8xxx_spi->workqueue == NULL) { ret = -EBUSY; goto err; } return 0; err: return ret; } int __devexit mpc8xxx_spi_remove(struct device *dev) { struct mpc8xxx_spi *mpc8xxx_spi; struct spi_master *master; master = dev_get_drvdata(dev); mpc8xxx_spi = spi_master_get_devdata(master); flush_workqueue(mpc8xxx_spi->workqueue); destroy_workqueue(mpc8xxx_spi->workqueue); spi_unregister_master(master); free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); if (mpc8xxx_spi->spi_remove) mpc8xxx_spi->spi_remove(mpc8xxx_spi); return 0; } int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_spi_probe_info *pinfo; struct fsl_spi_platform_data *pdata; const void *prop; int ret = -ENOMEM; pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); if (!pinfo) return -ENOMEM; pdata = &pinfo->pdata; dev->platform_data = pdata; /* Allocate bus num dynamically. */ pdata->bus_num = -1; /* SPI controller is either clocked from QE or SoC clock. */ pdata->sysclk = get_brgfreq(); if (pdata->sysclk == -1) { pdata->sysclk = fsl_get_sys_freq(); if (pdata->sysclk == -1) { ret = -ENODEV; goto err; } } prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata->flags = SPI_QE_CPU_MODE; else if (prop && !strcmp(prop, "qe")) pdata->flags = SPI_CPM_MODE | SPI_QE; else if (of_device_is_compatible(np, "fsl,cpm2-spi")) pdata->flags = SPI_CPM_MODE | SPI_CPM2; else if (of_device_is_compatible(np, "fsl,cpm1-spi")) pdata->flags = SPI_CPM_MODE | SPI_CPM1; return 0; err: kfree(pinfo); return ret; }
gpl-2.0
cphelps76/DEMENTED_kernel_n8013
arch/arm/mach-omap1/board-palmtt.c
2549
7313
/* * linux/arch/arm/mach-omap1/board-palmtt.c * * Modified from board-palmtt2.c * * Modified and amended for Palm Tungsten|T * by Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/notifier.h> #include <linux/clk.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/led.h> #include <mach/gpio.h> #include <plat/flash.h> #include <plat/mux.h> #include <plat/usb.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/irda.h> #include <plat/keypad.h> #include <plat/common.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #define PALMTT_USBDETECT_GPIO 0 #define PALMTT_CABLE_GPIO 1 #define PALMTT_LED_GPIO 3 #define PALMTT_PENIRQ_GPIO 6 #define PALMTT_MMC_WP_GPIO 8 #define PALMTT_HDQ_GPIO 11 static const unsigned int palmtt_keymap[] = { KEY(0, 0, KEY_ESC), KEY(1, 0, KEY_SPACE), KEY(2, 0, KEY_LEFTCTRL), KEY(3, 0, KEY_TAB), KEY(4, 0, KEY_ENTER), KEY(0, 1, KEY_LEFT), KEY(1, 1, KEY_DOWN), KEY(2, 1, KEY_UP), KEY(3, 1, KEY_RIGHT), KEY(0, 2, KEY_SLEEP), KEY(4, 2, KEY_Y), }; static struct mtd_partition palmtt_partitions[] = { { .name = "write8k", .offset = 0, .size = SZ_8K, .mask_flags = 0, }, { .name = "PalmOS-BootLoader(ro)", .offset = SZ_8K, .size = 7 * SZ_8K, .mask_flags = MTD_WRITEABLE, }, { .name = "u-boot", .offset = MTDPART_OFS_APPEND, .size = 8 * SZ_8K, .mask_flags = 0, }, { .name = "PalmOS-FS(ro)", .offset = MTDPART_OFS_APPEND, .size = 7 * SZ_1M + 4 * SZ_64K - 16 * SZ_8K, .mask_flags = MTD_WRITEABLE, }, { .name = "u-boot(rez)", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0 }, { .name = "empty", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct physmap_flash_data palmtt_flash_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = palmtt_partitions, .nr_parts = ARRAY_SIZE(palmtt_partitions), }; static struct resource palmtt_flash_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device palmtt_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &palmtt_flash_data, }, .num_resources = 1, .resource = &palmtt_flash_resource, }; static struct resource palmtt_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data palmtt_keymap_data = { .keymap = palmtt_keymap, .keymap_size = ARRAY_SIZE(palmtt_keymap), }; static struct omap_kp_platform_data palmtt_kp_data = { .rows = 6, .cols = 3, .keymap_data = &palmtt_keymap_data, }; static struct platform_device palmtt_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &palmtt_kp_data, }, .num_resources = ARRAY_SIZE(palmtt_kp_resources), .resource = palmtt_kp_resources, }; static struct platform_device palmtt_lcd_device = { .name = "lcd_palmtt", .id = -1, }; static struct omap_irda_config palmtt_irda_config = { .transceiver_cap = IR_SIRMODE, .rx_channel = OMAP_DMA_UART3_RX, .tx_channel = OMAP_DMA_UART3_TX, .dest_start = UART3_THR, .src_start = UART3_RHR, .tx_trigger = 0, .rx_trigger = 0, }; static struct resource palmtt_irda_resources[] = { [0] = { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, }; static struct platform_device palmtt_irda_device = { .name = "omapirda", .id = -1, .dev = { .platform_data = &palmtt_irda_config, }, .num_resources = ARRAY_SIZE(palmtt_irda_resources), .resource = palmtt_irda_resources, }; static struct platform_device palmtt_spi_device = { .name = "spi_palmtt", .id = -1, }; static struct omap_backlight_config palmtt_backlight_config = { .default_intensity = 0xa0, }; static struct platform_device palmtt_backlight_device = { .name = "omap-bl", .id = -1, .dev = { .platform_data= &palmtt_backlight_config, }, }; static struct omap_led_config palmtt_led_config[] = { { .cdev = { .name = "palmtt:led0", }, .gpio = PALMTT_LED_GPIO, }, }; static struct omap_led_platform_data palmtt_led_data = { .nr_leds = ARRAY_SIZE(palmtt_led_config), .leds = palmtt_led_config, }; static struct platform_device palmtt_led_device = { .name = "omap-led", .id = -1, .dev = { .platform_data = &palmtt_led_data, }, }; static struct platform_device *palmtt_devices[] __initdata = { &palmtt_flash_device, &palmtt_kp_device, &palmtt_lcd_device, &palmtt_irda_device, &palmtt_spi_device, &palmtt_backlight_device, &palmtt_led_device, }; static int palmtt_get_pendown_state(void) { return !gpio_get_value(6); } static const struct ads7846_platform_data palmtt_ts_info = { .model = 7846, .vref_delay_usecs = 100, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .get_pendown_state = palmtt_get_pendown_state, }; static struct spi_board_info __initdata palmtt_boardinfo[] = { { /* MicroWire (bus 2) CS0 has an ads7846e */ .modalias = "ads7846", .platform_data = &palmtt_ts_info, .irq = OMAP_GPIO_IRQ(6), .max_speed_hz = 120000 /* max sample rate at 3V */ * 26 /* command + data + overhead */, .bus_num = 2, .chip_select = 0, } }; static void __init omap_palmtt_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); } static struct omap_usb_config palmtt_usb_config __initdata = { .register_dev = 1, .hmc_mode = 0, .pins[0] = 2, }; static struct omap_lcd_config palmtt_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel palmtt_config[] __initdata = { { OMAP_TAG_LCD, &palmtt_lcd_config }, }; static void __init omap_mpu_wdt_mode(int mode) { if (mode) omap_writew(0x8000, OMAP_WDT_TIMER_MODE); else { omap_writew(0x00f5, OMAP_WDT_TIMER_MODE); omap_writew(0x00a0, OMAP_WDT_TIMER_MODE); } } static void __init omap_palmtt_init(void) { /* mux pins for uarts */ omap_cfg_reg(UART1_TX); omap_cfg_reg(UART1_RTS); omap_cfg_reg(UART2_TX); omap_cfg_reg(UART2_RTS); omap_cfg_reg(UART3_TX); omap_cfg_reg(UART3_RX); omap_mpu_wdt_mode(0); omap_board_config = palmtt_config; omap_board_config_size = ARRAY_SIZE(palmtt_config); platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices)); spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo)); omap_serial_init(); omap1_usb_init(&palmtt_usb_config); omap_register_i2c_bus(1, 100, NULL, 0); } static void __init omap_palmtt_map_io(void) { omap1_map_common_io(); } MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T") .boot_params = 0x10000100, .map_io = omap_palmtt_map_io, .reserve = omap_reserve, .init_irq = omap_palmtt_init_irq, .init_machine = omap_palmtt_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
KylinUI/android_kernel_samsung_smdk4412
drivers/mtd/maps/bcm963xx-flash.c
2805
7261
/* * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> * Mike Albon <malbon@openwrt.org> * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach-bcm63xx/bcm963xx_tag.h> #define BCM63XX_BUSWIDTH 2 /* Buswidth */ #define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ #define PFX KBUILD_MODNAME ": " static struct mtd_partition *parsed_parts; static struct mtd_info *bcm963xx_mtd_info; static struct map_info bcm963xx_map = { .name = "bcm963xx", .bankwidth = BCM63XX_BUSWIDTH, }; static int parse_cfe_partitions(struct mtd_info *master, struct mtd_partition **pparts) { /* CFE, NVRAM and global Linux are always present */ int nrparts = 3, curpart = 0; struct bcm_tag *buf; struct mtd_partition *parts; int ret; size_t retlen; unsigned int rootfsaddr, kerneladdr, spareaddr; unsigned int rootfslen, kernellen, sparelen, totallen; int namelen = 0; int i; char *boardid; char *tagversion; /* Allocate memory for buffer */ buf = vmalloc(sizeof(struct bcm_tag)); if (!buf) return -ENOMEM; /* Get the tag */ ret = master->read(master, master->erasesize, sizeof(struct bcm_tag), &retlen, (void *)buf); if (retlen != sizeof(struct bcm_tag)) { vfree(buf); return -EIO; } sscanf(buf->kernel_address, "%u", &kerneladdr); sscanf(buf->kernel_length, "%u", &kernellen); sscanf(buf->total_length, "%u", &totallen); tagversion = &(buf->tag_version[0]); boardid = &(buf->board_id[0]); printk(KERN_INFO PFX "CFE boot tag found with version %s " "and board type %s\n", tagversion, boardid); kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; rootfsaddr = kerneladdr + kernellen; spareaddr = roundup(totallen, master->erasesize) + master->erasesize; sparelen = master->size - spareaddr - master->erasesize; rootfslen = spareaddr - rootfsaddr; /* Determine number of partitions */ namelen = 8; if (rootfslen > 0) { nrparts++; namelen += 6; }; if (kernellen > 0) { nrparts++; namelen += 6; }; /* Ask kernel for more memory */ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); if (!parts) { vfree(buf); return -ENOMEM; }; /* Start building partition list */ parts[curpart].name = "CFE"; parts[curpart].offset = 0; parts[curpart].size = master->erasesize; curpart++; if (kernellen > 0) { parts[curpart].name = "kernel"; parts[curpart].offset = kerneladdr; parts[curpart].size = kernellen; curpart++; }; if (rootfslen > 0) { parts[curpart].name = "rootfs"; parts[curpart].offset = rootfsaddr; parts[curpart].size = rootfslen; if (sparelen > 0) parts[curpart].size += sparelen; curpart++; }; parts[curpart].name = "nvram"; parts[curpart].offset = master->size - master->erasesize; parts[curpart].size = master->erasesize; /* Global partition "linux" to make easy firmware upgrade */ curpart++; parts[curpart].name = "linux"; parts[curpart].offset = parts[0].size; parts[curpart].size = master->size - parts[0].size - parts[3].size; for (i = 0; i < nrparts; i++) printk(KERN_INFO PFX "Partition %d is %s offset %lx and " "length %lx\n", i, parts[i].name, (long unsigned int)(parts[i].offset), (long unsigned int)(parts[i].size)); printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n", spareaddr, sparelen); *pparts = parts; vfree(buf); return nrparts; }; static int bcm963xx_detect_cfe(struct mtd_info *master) { int idoffset = 0x4e0; static char idstring[8] = "CFE1CFE1"; char buf[9]; int ret; size_t retlen; ret = master->read(master, idoffset, 8, &retlen, (void *)buf); buf[retlen] = 0; printk(KERN_INFO PFX "Read Signature value of %s\n", buf); return strncmp(idstring, buf, 8); } static int bcm963xx_probe(struct platform_device *pdev) { int err = 0; int parsed_nr_parts = 0; char *part_type; struct resource *r; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no resource supplied\n"); return -ENODEV; } bcm963xx_map.phys = r->start; bcm963xx_map.size = resource_size(r); bcm963xx_map.virt = ioremap(r->start, resource_size(r)); if (!bcm963xx_map.virt) { dev_err(&pdev->dev, "failed to ioremap\n"); return -EIO; } dev_info(&pdev->dev, "0x%08lx at 0x%08x\n", bcm963xx_map.size, bcm963xx_map.phys); simple_map_init(&bcm963xx_map); bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map); if (!bcm963xx_mtd_info) { dev_err(&pdev->dev, "failed to probe using CFI\n"); bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map); if (bcm963xx_mtd_info) goto probe_ok; dev_err(&pdev->dev, "failed to probe using JEDEC\n"); err = -EIO; goto err_probe; } probe_ok: bcm963xx_mtd_info->owner = THIS_MODULE; /* This is mutually exclusive */ if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) { dev_info(&pdev->dev, "CFE bootloader detected\n"); if (parsed_nr_parts == 0) { int ret = parse_cfe_partitions(bcm963xx_mtd_info, &parsed_parts); if (ret > 0) { part_type = "CFE"; parsed_nr_parts = ret; } } } else { dev_info(&pdev->dev, "unsupported bootloader\n"); err = -ENODEV; goto err_probe; } return mtd_device_register(bcm963xx_mtd_info, parsed_parts, parsed_nr_parts); err_probe: iounmap(bcm963xx_map.virt); return err; } static int bcm963xx_remove(struct platform_device *pdev) { if (bcm963xx_mtd_info) { mtd_device_unregister(bcm963xx_mtd_info); map_destroy(bcm963xx_mtd_info); } if (bcm963xx_map.virt) { iounmap(bcm963xx_map.virt); bcm963xx_map.virt = 0; } return 0; } static struct platform_driver bcm63xx_mtd_dev = { .probe = bcm963xx_probe, .remove = bcm963xx_remove, .driver = { .name = "bcm963xx-flash", .owner = THIS_MODULE, }, }; static int __init bcm963xx_mtd_init(void) { return platform_driver_register(&bcm63xx_mtd_dev); } static void __exit bcm963xx_mtd_exit(void) { platform_driver_unregister(&bcm63xx_mtd_dev); } module_init(bcm963xx_mtd_init); module_exit(bcm963xx_mtd_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot"); MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
gpl-2.0
lolhi/at1-S0834211
fs/ocfs2/dlm/dlmrecovery.c
2805
87471
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmrecovery.c * * recovery stuff * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/timer.h> #include <linux/kthread.h> #include <linux/delay.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) #include "cluster/masklog.h" static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_recovery_thread(void *data); void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); static int dlm_do_recovery(struct dlm_ctxt *dlm); static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node); static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, const char *lockname, int namelen, int total_locks, u64 cookie, u8 flags, u8 master); static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres, u8 send_to, struct dlm_lock_resource *res, int total_locks); static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres); static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to); static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, struct list_head *list, u8 dead_node); static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, u8 dead_node, u8 new_master); static void dlm_reco_ast(void *astdata); static void dlm_reco_bast(void *astdata, int blocked_type); static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data); static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 *real_master); static u64 dlm_get_next_mig_cookie(void); static DEFINE_SPINLOCK(dlm_reco_state_lock); static DEFINE_SPINLOCK(dlm_mig_cookie_lock); static u64 dlm_mig_cookie = 1; static u64 dlm_get_next_mig_cookie(void) { u64 c; spin_lock(&dlm_mig_cookie_lock); c = dlm_mig_cookie; if (dlm_mig_cookie == (~0ULL)) dlm_mig_cookie = 1; else dlm_mig_cookie++; spin_unlock(&dlm_mig_cookie_lock); return c; } static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, u8 dead_node) { assert_spin_locked(&dlm->spinlock); if (dlm->reco.dead_node != dead_node) mlog(0, "%s: changing dead_node from %u to %u\n", dlm->name, dlm->reco.dead_node, dead_node); dlm->reco.dead_node = dead_node; } static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, u8 master) { assert_spin_locked(&dlm->spinlock); mlog(0, "%s: changing new_master from %u to %u\n", dlm->name, dlm->reco.new_master, master); dlm->reco.new_master = master; } static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) { assert_spin_locked(&dlm->spinlock); clear_bit(dlm->reco.dead_node, dlm->recovery_map); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); } static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); __dlm_reset_recovery(dlm); spin_unlock(&dlm->spinlock); } /* Worker function used during recovery. */ void dlm_dispatch_work(struct work_struct *work) { struct dlm_ctxt *dlm = container_of(work, struct dlm_ctxt, dispatched_work); LIST_HEAD(tmp_list); struct dlm_work_item *item, *next; dlm_workfunc_t *workfunc; int tot=0; spin_lock(&dlm->work_lock); list_splice_init(&dlm->work_list, &tmp_list); spin_unlock(&dlm->work_lock); list_for_each_entry(item, &tmp_list, list) { tot++; } mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); list_for_each_entry_safe(item, next, &tmp_list, list) { workfunc = item->func; list_del_init(&item->list); /* already have ref on dlm to avoid having * it disappear. just double-check. */ BUG_ON(item->dlm != dlm); /* this is allowed to sleep and * call network stuff */ workfunc(item, item->data); dlm_put(dlm); kfree(item); } } /* * RECOVERY THREAD */ void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) { /* wake the recovery thread * this will wake the reco thread in one of three places * 1) sleeping with no recovery happening * 2) sleeping with recovery mastered elsewhere * 3) recovery mastered here, waiting on reco data */ wake_up(&dlm->dlm_reco_thread_wq); } /* Launch the recovery thread */ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) { mlog(0, "starting dlm recovery thread...\n"); dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, "dlm_reco_thread"); if (IS_ERR(dlm->dlm_reco_thread_task)) { mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); dlm->dlm_reco_thread_task = NULL; return -EINVAL; } return 0; } void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) { if (dlm->dlm_reco_thread_task) { mlog(0, "waiting for dlm recovery thread to exit\n"); kthread_stop(dlm->dlm_reco_thread_task); dlm->dlm_reco_thread_task = NULL; } } /* * this is lame, but here's how recovery works... * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. * thats right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done * 5) the new master collects up all of secondary lock queue info * one lock at a time, forcing each node to communicate back * before continuing * 6) each secondary lock queue responds with the full known lock info * 7) once the new master has run all its locks, it sends a ALLDONE! * message to everyone * 8) upon receiving this message, the secondary queue node unlocks * and responds to the ALLDONE * 9) once the new master gets responses from everyone, he unlocks * everything and recovery for this dead node is done *10) go back to 2) while there are still dead nodes * */ static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) { struct dlm_reco_node_data *ndata; struct dlm_lock_resource *res; mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", dlm->reco.dead_node, dlm->reco.new_master); list_for_each_entry(ndata, &dlm->reco.node_data, list) { char *st = "unknown"; switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: st = "init"; break; case DLM_RECO_NODE_DATA_REQUESTING: st = "requesting"; break; case DLM_RECO_NODE_DATA_DEAD: st = "dead"; break; case DLM_RECO_NODE_DATA_RECEIVING: st = "receiving"; break; case DLM_RECO_NODE_DATA_REQUESTED: st = "requested"; break; case DLM_RECO_NODE_DATA_DONE: st = "done"; break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: st = "finalize-sent"; break; default: st = "bad"; break; } mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", dlm->name, ndata->node_num, st); } list_for_each_entry(res, &dlm->reco.resources, recovering) { mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", dlm->name, res->lockname.len, res->lockname.name); } } #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) static int dlm_recovery_thread(void *data) { int status; struct dlm_ctxt *dlm = data; unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); mlog(0, "dlm thread running for %s...\n", dlm->name); while (!kthread_should_stop()) { if (dlm_domain_fully_joined(dlm)) { status = dlm_do_recovery(dlm); if (status == -EAGAIN) { /* do not sleep, recheck immediately. */ continue; } if (status < 0) mlog_errno(status); } wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, kthread_should_stop(), timeout); } mlog(0, "quitting DLM recovery thread\n"); return 0; } /* returns true when the recovery master has contacted us */ static int dlm_reco_master_ready(struct dlm_ctxt *dlm) { int ready; spin_lock(&dlm->spinlock); ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); spin_unlock(&dlm->spinlock); return ready; } /* returns true if node is no longer in the domain * could be dead or just not joined */ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) { int dead; spin_lock(&dlm->spinlock); dead = !test_bit(node, dlm->domain_map); spin_unlock(&dlm->spinlock); return dead; } /* returns true if node is no longer in the domain * could be dead or just not joined */ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) { int recovered; spin_lock(&dlm->spinlock); recovered = !test_bit(node, dlm->recovery_map); spin_unlock(&dlm->spinlock); return recovered; } int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) { if (timeout) { mlog(ML_NOTICE, "%s: waiting %dms for notification of " "death of node %u\n", dlm->name, timeout, node); wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, node), msecs_to_jiffies(timeout)); } else { mlog(ML_NOTICE, "%s: waiting indefinitely for notification " "of death of node %u\n", dlm->name, node); wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, node)); } /* for now, return 0 */ return 0; } int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) { if (timeout) { mlog(0, "%s: waiting %dms for notification of " "recovery of node %u\n", dlm->name, timeout, node); wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_recovered(dlm, node), msecs_to_jiffies(timeout)); } else { mlog(0, "%s: waiting indefinitely for notification " "of recovery of node %u\n", dlm->name, node); wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_recovered(dlm, node)); } /* for now, return 0 */ return 0; } /* callers of the top-level api calls (dlmlock/dlmunlock) should * block on the dlm->reco.event when recovery is in progress. * the dlm recovery thread will set this state when it begins * recovering a dead node (as the new master or not) and clear * the state and wake as soon as all affected lock resources have * been marked with the RECOVERY flag */ static int dlm_in_recovery(struct dlm_ctxt *dlm) { int in_recovery; spin_lock(&dlm->spinlock); in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); spin_unlock(&dlm->spinlock); return in_recovery; } void dlm_wait_for_recovery(struct dlm_ctxt *dlm) { if (dlm_in_recovery(dlm)) { mlog(0, "%s: reco thread %d in recovery: " "state=%d, master=%u, dead=%u\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.state, dlm->reco.new_master, dlm->reco.dead_node); } wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); } static void dlm_begin_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); dlm->reco.state |= DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); } static void dlm_end_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); wake_up(&dlm->reco.event); } static int dlm_do_recovery(struct dlm_ctxt *dlm) { int status = 0; int ret; spin_lock(&dlm->spinlock); /* check to see if the new master has died */ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && test_bit(dlm->reco.new_master, dlm->recovery_map)) { mlog(0, "new master %u died while recovering %u!\n", dlm->reco.new_master, dlm->reco.dead_node); /* unset the new_master, leave dead_node */ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); } /* select a target to recover */ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { int bit; bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); if (bit >= O2NM_MAX_NODES || bit < 0) dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); else dlm_set_reco_dead_node(dlm, bit); } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { /* BUG? */ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", dlm->reco.dead_node); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); } if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { // mlog(0, "nothing to recover! sleeping now!\n"); spin_unlock(&dlm->spinlock); /* return to main thread loop and sleep. */ return 0; } mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.dead_node); spin_unlock(&dlm->spinlock); /* take write barrier */ /* (stops the list reshuffling thread, proxy ast handling) */ dlm_begin_recovery(dlm); if (dlm->reco.new_master == dlm->node_num) goto master_here; if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { /* choose a new master, returns 0 if this node * is the master, -EEXIST if it's another node. * this does not return until a new master is chosen * or recovery completes entirely. */ ret = dlm_pick_recovery_master(dlm); if (!ret) { /* already notified everyone. go. */ goto master_here; } mlog(0, "another node will master this recovery session.\n"); } mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, dlm->node_num, dlm->reco.dead_node); /* it is safe to start everything back up here * because all of the dead node's lock resources * have been marked as in-recovery */ dlm_end_recovery(dlm); /* sleep out in main dlm_recovery_thread loop. */ return 0; master_here: mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), dlm->node_num, dlm->reco.dead_node, dlm->name); status = dlm_remaster_locks(dlm, dlm->reco.dead_node); if (status < 0) { /* we should never hit this anymore */ mlog(ML_ERROR, "error %d remastering locks for node %u, " "retrying.\n", status, dlm->reco.dead_node); /* yield a bit to allow any final network messages * to get handled on remaining nodes */ msleep(100); } else { /* success! see if any other nodes need recovery */ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", dlm->name, dlm->reco.dead_node, dlm->node_num); dlm_reset_recovery(dlm); } dlm_end_recovery(dlm); /* continue and look for another dead node */ return -EAGAIN; } static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) { int status = 0; struct dlm_reco_node_data *ndata; int all_nodes_done; int destroy = 0; int pass = 0; do { /* we have become recovery master. there is no escaping * this, so just keep trying until we get it. */ status = dlm_init_recovery_area(dlm, dead_node); if (status < 0) { mlog(ML_ERROR, "%s: failed to alloc recovery area, " "retrying\n", dlm->name); msleep(1000); } } while (status != 0); /* safe to access the node data list without a lock, since this * process is the only one to change the list */ list_for_each_entry(ndata, &dlm->reco.node_data, list) { BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); ndata->state = DLM_RECO_NODE_DATA_REQUESTING; mlog(0, "requesting lock info from node %u\n", ndata->node_num); if (ndata->node_num == dlm->node_num) { ndata->state = DLM_RECO_NODE_DATA_DONE; continue; } do { status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); if (status < 0) { mlog_errno(status); if (dlm_is_host_down(status)) { /* node died, ignore it for recovery */ status = 0; ndata->state = DLM_RECO_NODE_DATA_DEAD; /* wait for the domain map to catch up * with the network state. */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, ndata->node_num), msecs_to_jiffies(1000)); mlog(0, "waited 1 sec for %u, " "dead? %s\n", ndata->node_num, dlm_is_node_dead(dlm, ndata->node_num) ? "yes" : "no"); } else { /* -ENOMEM on the other node */ mlog(0, "%s: node %u returned " "%d during recovery, retrying " "after a short wait\n", dlm->name, ndata->node_num, status); msleep(100); } } } while (status != 0); spin_lock(&dlm_reco_state_lock); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_FINALIZE_SENT: case DLM_RECO_NODE_DATA_REQUESTED: BUG(); break; case DLM_RECO_NODE_DATA_DEAD: mlog(0, "node %u died after requesting " "recovery info for node %u\n", ndata->node_num, dead_node); /* fine. don't need this node's info. * continue without it. */ break; case DLM_RECO_NODE_DATA_REQUESTING: ndata->state = DLM_RECO_NODE_DATA_REQUESTED; mlog(0, "now receiving recovery data from " "node %u for dead node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING: mlog(0, "already receiving recovery data from " "node %u for dead node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_DONE: mlog(0, "already DONE receiving recovery data " "from node %u for dead node %u\n", ndata->node_num, dead_node); break; } spin_unlock(&dlm_reco_state_lock); } mlog(0, "done requesting all lock info\n"); /* nodes should be sending reco data now * just need to wait */ while (1) { /* check all the nodes now to see if we are * done, or if anyone died */ all_nodes_done = 1; spin_lock(&dlm_reco_state_lock); list_for_each_entry(ndata, &dlm->reco.node_data, list) { mlog(0, "checking recovery state of node %u\n", ndata->node_num); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_REQUESTING: mlog(ML_ERROR, "bad ndata state for " "node %u: state=%d\n", ndata->node_num, ndata->state); BUG(); break; case DLM_RECO_NODE_DATA_DEAD: mlog(0, "node %u died after " "requesting recovery info for " "node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: mlog(0, "%s: node %u still in state %s\n", dlm->name, ndata->node_num, ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? "receiving" : "requested"); all_nodes_done = 0; break; case DLM_RECO_NODE_DATA_DONE: mlog(0, "%s: node %u state is done\n", dlm->name, ndata->node_num); break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: mlog(0, "%s: node %u state is finalize\n", dlm->name, ndata->node_num); break; } } spin_unlock(&dlm_reco_state_lock); mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, all_nodes_done?"yes":"no"); if (all_nodes_done) { int ret; /* all nodes are now in DLM_RECO_NODE_DATA_DONE state * just send a finalize message to everyone and * clean up */ mlog(0, "all nodes are done! send finalize\n"); ret = dlm_send_finalize_reco_message(dlm); if (ret < 0) mlog_errno(ret); spin_lock(&dlm->spinlock); dlm_finish_local_lockres_recovery(dlm, dead_node, dlm->node_num); spin_unlock(&dlm->spinlock); mlog(0, "should be done with recovery!\n"); mlog(0, "finishing recovery of %s at %lu, " "dead=%u, this=%u, new=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num, dlm->reco.new_master); destroy = 1; status = 0; /* rescan everything marked dirty along the way */ dlm_kick_thread(dlm, NULL); break; } /* wait to be signalled, with periodic timeout * to check for node death */ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, kthread_should_stop(), msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); } if (destroy) dlm_destroy_recovery_area(dlm, dead_node); return status; } static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) { int num=0; struct dlm_reco_node_data *ndata; spin_lock(&dlm->spinlock); memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); /* nodes can only be removed (by dying) after dropping * this lock, and death will be trapped later, so this should do */ spin_unlock(&dlm->spinlock); while (1) { num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); if (num >= O2NM_MAX_NODES) { break; } BUG_ON(num == dead_node); ndata = kzalloc(sizeof(*ndata), GFP_NOFS); if (!ndata) { dlm_destroy_recovery_area(dlm, dead_node); return -ENOMEM; } ndata->node_num = num; ndata->state = DLM_RECO_NODE_DATA_INIT; spin_lock(&dlm_reco_state_lock); list_add_tail(&ndata->list, &dlm->reco.node_data); spin_unlock(&dlm_reco_state_lock); num++; } return 0; } static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_reco_node_data *ndata, *next; LIST_HEAD(tmplist); spin_lock(&dlm_reco_state_lock); list_splice_init(&dlm->reco.node_data, &tmplist); spin_unlock(&dlm_reco_state_lock); list_for_each_entry_safe(ndata, next, &tmplist, list) { list_del_init(&ndata->list); kfree(ndata); } } static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node) { struct dlm_lock_request lr; enum dlm_status ret; mlog(0, "\n"); mlog(0, "dlm_request_all_locks: dead node is %u, sending request " "to %u\n", dead_node, request_from); memset(&lr, 0, sizeof(lr)); lr.node_idx = dlm->node_num; lr.dead_node = dead_node; // send message ret = DLM_NOLOCKMGR; ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, &lr, sizeof(lr), request_from, NULL); /* negative status is handled by caller */ if (ret < 0) mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, dlm->key, request_from); // return from here, then // sleep until all received or error return ret; } int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; char *buf = NULL; struct dlm_work_item *item = NULL; if (!dlm_grab(dlm)) return -EINVAL; if (lr->dead_node != dlm->reco.dead_node) { mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " "dead_node is %u\n", dlm->name, lr->node_idx, lr->dead_node, dlm->reco.dead_node); dlm_print_reco_node_status(dlm); /* this is a hack */ dlm_put(dlm); return -ENOMEM; } BUG_ON(lr->dead_node != dlm->reco.dead_node); item = kzalloc(sizeof(*item), GFP_NOFS); if (!item) { dlm_put(dlm); return -ENOMEM; } /* this will get freed by dlm_request_all_locks_worker */ buf = (char *) __get_free_page(GFP_NOFS); if (!buf) { kfree(item); dlm_put(dlm); return -ENOMEM; } /* queue up work for dlm_request_all_locks_worker */ dlm_grab(dlm); /* get an extra ref for the work item */ dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); item->u.ral.reco_master = lr->node_idx; item->u.ral.dead_node = lr->dead_node; spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); dlm_put(dlm); return 0; } static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) { struct dlm_migratable_lockres *mres; struct dlm_lock_resource *res; struct dlm_ctxt *dlm; LIST_HEAD(resources); int ret; u8 dead_node, reco_master; int skip_all_done = 0; dlm = item->dlm; dead_node = item->u.ral.dead_node; reco_master = item->u.ral.reco_master; mres = (struct dlm_migratable_lockres *)data; mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", dlm->name, dead_node, reco_master); if (dead_node != dlm->reco.dead_node || reco_master != dlm->reco.new_master) { /* worker could have been created before the recovery master * died. if so, do not continue, but do not error. */ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: will not send recovery state, " "recovery master %u died, thread=(dead=%u,mas=%u)" " current=(dead=%u,mas=%u)\n", dlm->name, reco_master, dead_node, reco_master, dlm->reco.dead_node, dlm->reco.new_master); } else { mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " "master=%u), request(dead=%u, master=%u)\n", dlm->name, dlm->reco.dead_node, dlm->reco.new_master, dead_node, reco_master); } goto leave; } /* lock resources should have already been moved to the * dlm->reco.resources list. now move items from that list * to a temp list if the dead owner matches. note that the * whole cluster recovers only one node at a time, so we * can safely move UNKNOWN lock resources for each recovery * session. */ dlm_move_reco_locks_to_list(dlm, &resources, dead_node); /* now we can begin blasting lockreses without the dlm lock */ /* any errors returned will be due to the new_master dying, * the dlm_reco_thread should detect this */ list_for_each_entry(res, &resources, recovering) { ret = dlm_send_one_lockres(dlm, res, mres, reco_master, DLM_MRES_RECOVERY); if (ret < 0) { mlog(ML_ERROR, "%s: node %u went down while sending " "recovery state for dead node %u, ret=%d\n", dlm->name, reco_master, dead_node, ret); skip_all_done = 1; break; } } /* move the resources back to the list */ spin_lock(&dlm->spinlock); list_splice_init(&resources, &dlm->reco.resources); spin_unlock(&dlm->spinlock); if (!skip_all_done) { ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); if (ret < 0) { mlog(ML_ERROR, "%s: node %u went down while sending " "recovery all-done for dead node %u, ret=%d\n", dlm->name, reco_master, dead_node, ret); } } leave: free_page((unsigned long)data); } static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) { int ret, tmpret; struct dlm_reco_data_done done_msg; memset(&done_msg, 0, sizeof(done_msg)); done_msg.node_idx = dlm->node_num; done_msg.dead_node = dead_node; mlog(0, "sending DATA DONE message to %u, " "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, done_msg.dead_node); ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, sizeof(done_msg), send_to, &tmpret); if (ret < 0) { mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, dlm->key, send_to); if (!dlm_is_host_down(ret)) { BUG(); } } else ret = tmpret; return ret; } int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; struct dlm_reco_node_data *ndata = NULL; int ret = -EINVAL; if (!dlm_grab(dlm)) return -EINVAL; mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node, dlm->reco.dead_node, done->node_idx, dlm->node_num); mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node, dlm->reco.dead_node, done->node_idx, dlm->node_num); spin_lock(&dlm_reco_state_lock); list_for_each_entry(ndata, &dlm->reco.node_data, list) { if (ndata->node_num != done->node_idx) continue; switch (ndata->state) { /* should have moved beyond INIT but not to FINALIZE yet */ case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_DEAD: case DLM_RECO_NODE_DATA_FINALIZE_SENT: mlog(ML_ERROR, "bad ndata state for node %u:" " state=%d\n", ndata->node_num, ndata->state); BUG(); break; /* these states are possible at this point, anywhere along * the line of recovery */ case DLM_RECO_NODE_DATA_DONE: case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: case DLM_RECO_NODE_DATA_REQUESTING: mlog(0, "node %u is DONE sending " "recovery data!\n", ndata->node_num); ndata->state = DLM_RECO_NODE_DATA_DONE; ret = 0; break; } } spin_unlock(&dlm_reco_state_lock); /* wake the recovery thread, some node is done */ if (!ret) dlm_kick_recovery_thread(dlm); if (ret < 0) mlog(ML_ERROR, "failed to find recovery node data for node " "%u\n", done->node_idx); dlm_put(dlm); mlog(0, "leaving reco data done handler, ret=%d\n", ret); return ret; } static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, struct list_head *list, u8 dead_node) { struct dlm_lock_resource *res, *next; struct dlm_lock *lock; spin_lock(&dlm->spinlock); list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { spin_lock(&res->spinlock); list_for_each_entry(lock, &res->granted, list) { if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); break; } } spin_unlock(&res->spinlock); continue; } if (res->owner == dead_node) { mlog(0, "found lockres owned by dead node while " "doing recovery for node %u. sending it.\n", dead_node); list_move_tail(&res->recovering, list); } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "found UNKNOWN owner while doing recovery " "for node %u. sending it.\n", dead_node); list_move_tail(&res->recovering, list); } } spin_unlock(&dlm->spinlock); } static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) { int total_locks = 0; struct list_head *iter, *queue = &res->granted; int i; for (i=0; i<3; i++) { list_for_each(iter, queue) total_locks++; queue++; } return total_locks; } static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres, u8 send_to, struct dlm_lock_resource *res, int total_locks) { u64 mig_cookie = be64_to_cpu(mres->mig_cookie); int mres_total_locks = be32_to_cpu(mres->total_locks); int sz, ret = 0, status = 0; u8 orig_flags = mres->flags, orig_master = mres->master; BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); if (!mres->num_locks) return 0; sz = sizeof(struct dlm_migratable_lockres) + (mres->num_locks * sizeof(struct dlm_migratable_lock)); /* add an all-done flag if we reached the last lock */ orig_flags = mres->flags; BUG_ON(total_locks > mres_total_locks); if (total_locks == mres_total_locks) mres->flags |= DLM_MRES_ALL_DONE; mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", dlm->name, res->lockname.len, res->lockname.name, orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", send_to); /* send it */ ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, sz, send_to, &status); if (ret < 0) { /* XXX: negative status is not handled. * this will end up killing this node. */ mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, dlm->key, send_to); } else { /* might get an -ENOMEM back here */ ret = status; if (ret < 0) { mlog_errno(ret); if (ret == -EFAULT) { mlog(ML_ERROR, "node %u told me to kill " "myself!\n", send_to); BUG(); } } } /* zero and reinit the message buffer */ dlm_init_migratable_lockres(mres, res->lockname.name, res->lockname.len, mres_total_locks, mig_cookie, orig_flags, orig_master); return ret; } static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, const char *lockname, int namelen, int total_locks, u64 cookie, u8 flags, u8 master) { /* mres here is one full page */ clear_page(mres); mres->lockname_len = namelen; memcpy(mres->lockname, lockname, namelen); mres->num_locks = 0; mres->total_locks = cpu_to_be32(total_locks); mres->mig_cookie = cpu_to_be64(cookie); mres->flags = flags; mres->master = master; } static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, struct dlm_migratable_lockres *mres, int queue) { if (!lock->lksb) return; /* Ignore lvb in all locks in the blocked list */ if (queue == DLM_BLOCKED_LIST) return; /* Only consider lvbs in locks with granted EX or PR lock levels */ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) return; if (dlm_lvb_is_empty(mres->lvb)) { memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); return; } /* Ensure the lvb copied for migration matches in other valid locks */ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) return; mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " "node=%u\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->lockres->lockname.len, lock->lockres->lockname.name, lock->ml.node); dlm_print_one_lock_resource(lock->lockres); BUG(); } /* returns 1 if this lock fills the network structure, * 0 otherwise */ static int dlm_add_lock_to_array(struct dlm_lock *lock, struct dlm_migratable_lockres *mres, int queue) { struct dlm_migratable_lock *ml; int lock_num = mres->num_locks; ml = &(mres->ml[lock_num]); ml->cookie = lock->ml.cookie; ml->type = lock->ml.type; ml->convert_type = lock->ml.convert_type; ml->highest_blocked = lock->ml.highest_blocked; ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; dlm_prepare_lvb_for_migration(lock, mres, queue); } ml->node = lock->ml.node; mres->num_locks++; /* we reached the max, send this network message */ if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) return 1; return 0; } static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres) { struct dlm_lock dummy; memset(&dummy, 0, sizeof(dummy)); dummy.ml.cookie = 0; dummy.ml.type = LKM_IVMODE; dummy.ml.convert_type = LKM_IVMODE; dummy.ml.highest_blocked = LKM_IVMODE; dummy.lksb = NULL; dummy.ml.node = dlm->node_num; dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); } static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, struct dlm_migratable_lock *ml, u8 *nodenum) { if (unlikely(ml->cookie == 0 && ml->type == LKM_IVMODE && ml->convert_type == LKM_IVMODE && ml->highest_blocked == LKM_IVMODE && ml->list == DLM_BLOCKED_LIST)) { *nodenum = ml->node; return 1; } return 0; } int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres, u8 send_to, u8 flags) { struct list_head *queue; int total_locks, i; u64 mig_cookie = 0; struct dlm_lock *lock; int ret = 0; BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); mlog(0, "sending to %u\n", send_to); total_locks = dlm_num_locks_in_lockres(res); if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { /* rare, but possible */ mlog(0, "argh. lockres has %d locks. this will " "require more than one network packet to " "migrate\n", total_locks); mig_cookie = dlm_get_next_mig_cookie(); } dlm_init_migratable_lockres(mres, res->lockname.name, res->lockname.len, total_locks, mig_cookie, flags, res->owner); total_locks = 0; for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry(lock, queue, list) { /* add another lock. */ total_locks++; if (!dlm_add_lock_to_array(lock, mres, i)) continue; /* this filled the lock message, * we must send it immediately. */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); if (ret < 0) goto error; } } if (total_locks == 0) { /* send a dummy lock to indicate a mastery reference only */ mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", dlm->name, res->lockname.len, res->lockname.name, send_to, flags & DLM_MRES_RECOVERY ? "recovery" : "migration"); dlm_add_dummy_lock(dlm, mres); } /* flush any remaining locks */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); if (ret < 0) goto error; return ret; error: mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", dlm->name, ret); if (!dlm_is_host_down(ret)) BUG(); mlog(0, "%s: node %u went down while sending %s " "lockres %.*s\n", dlm->name, send_to, flags & DLM_MRES_RECOVERY ? "recovery" : "migration", res->lockname.len, res->lockname.name); return ret; } /* * this message will contain no more than one page worth of * recovery data, and it will work on only one lockres. * there may be many locks in this page, and we may need to wait * for additional packets to complete all the locks (rare, but * possible). */ /* * NOTE: the allocation error cases here are scary * we really cannot afford to fail an alloc in recovery * do we spin? returning an error only delays the problem really */ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_migratable_lockres *mres = (struct dlm_migratable_lockres *)msg->buf; int ret = 0; u8 real_master; u8 extra_refs = 0; char *buf = NULL; struct dlm_work_item *item = NULL; struct dlm_lock_resource *res = NULL; if (!dlm_grab(dlm)) return -EINVAL; BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); real_master = mres->master; if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* cannot migrate a lockres with no master */ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); } mlog(0, "%s message received from node %u\n", (mres->flags & DLM_MRES_RECOVERY) ? "recovery" : "migration", mres->master); if (mres->flags & DLM_MRES_ALL_DONE) mlog(0, "all done flag. all lockres data received!\n"); ret = -ENOMEM; buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); item = kzalloc(sizeof(*item), GFP_NOFS); if (!buf || !item) goto leave; /* lookup the lock to see if we have a secondary queue for this * already... just add the locks in and this will have its owner * and RECOVERY flag changed when it completes. */ res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); if (res) { /* this will get a ref on res */ /* mark it as recovering/migrating and hash it */ spin_lock(&res->spinlock); if (mres->flags & DLM_MRES_RECOVERY) { res->state |= DLM_LOCK_RES_RECOVERING; } else { if (res->state & DLM_LOCK_RES_MIGRATING) { /* this is at least the second * lockres message */ mlog(0, "lock %.*s is already migrating\n", mres->lockname_len, mres->lockname); } else if (res->state & DLM_LOCK_RES_RECOVERING) { /* caller should BUG */ mlog(ML_ERROR, "node is attempting to migrate " "lock %.*s, but marked as recovering!\n", mres->lockname_len, mres->lockname); ret = -EFAULT; spin_unlock(&res->spinlock); goto leave; } res->state |= DLM_LOCK_RES_MIGRATING; } spin_unlock(&res->spinlock); } else { /* need to allocate, just like if it was * mastered here normally */ res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); if (!res) goto leave; /* to match the ref that we would have gotten if * dlm_lookup_lockres had succeeded */ dlm_lockres_get(res); /* mark it as recovering/migrating and hash it */ if (mres->flags & DLM_MRES_RECOVERY) res->state |= DLM_LOCK_RES_RECOVERING; else res->state |= DLM_LOCK_RES_MIGRATING; spin_lock(&dlm->spinlock); __dlm_insert_lockres(dlm, res); spin_unlock(&dlm->spinlock); /* Add an extra ref for this lock-less lockres lest the * dlm_thread purges it before we get the chance to add * locks to it */ dlm_lockres_get(res); /* There are three refs that need to be put. * 1. Taken above. * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). * 3. dlm_lookup_lockres() * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match * kref_init. The lockres will still have atleast one ref * added because it is in the hash __dlm_insert_lockres() */ extra_refs++; /* now that the new lockres is inserted, * make it usable by other processes */ spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); } /* at this point we have allocated everything we need, * and we have a hashed lockres with an extra ref and * the proper res->state flags. */ ret = 0; spin_lock(&res->spinlock); /* drop this either when master requery finds a different master * or when a lock is added by the recovery worker */ dlm_lockres_grab_inflight_ref(dlm, res); if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* migration cannot have an unknown master */ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); mlog(0, "recovery has passed me a lockres with an " "unknown owner.. will need to requery: " "%.*s\n", mres->lockname_len, mres->lockname); } else { /* take a reference now to pin the lockres, drop it * when locks are added in the worker */ dlm_change_lockres_owner(dlm, res, dlm->node_num); } spin_unlock(&res->spinlock); /* queue up work for dlm_mig_lockres_worker */ dlm_grab(dlm); /* get an extra ref for the work item */ memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); item->u.ml.lockres = res; /* already have a ref */ item->u.ml.real_master = real_master; item->u.ml.extra_ref = extra_refs; spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); leave: /* One extra ref taken needs to be put here */ if (extra_refs) dlm_lockres_put(res); dlm_put(dlm); if (ret < 0) { if (buf) kfree(buf); if (item) kfree(item); mlog_errno(ret); } return ret; } static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) { struct dlm_ctxt *dlm; struct dlm_migratable_lockres *mres; int ret = 0; struct dlm_lock_resource *res; u8 real_master; u8 extra_ref; dlm = item->dlm; mres = (struct dlm_migratable_lockres *)data; res = item->u.ml.lockres; real_master = item->u.ml.real_master; extra_ref = item->u.ml.extra_ref; if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* this case is super-rare. only occurs if * node death happens during migration. */ again: ret = dlm_lockres_master_requery(dlm, res, &real_master); if (ret < 0) { mlog(0, "dlm_lockres_master_requery ret=%d\n", ret); goto again; } if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "lockres %.*s not claimed. " "this node will take it.\n", res->lockname.len, res->lockname.name); } else { spin_lock(&res->spinlock); dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); mlog(0, "master needs to respond to sender " "that node %u still owns %.*s\n", real_master, res->lockname.len, res->lockname.name); /* cannot touch this lockres */ goto leave; } } ret = dlm_process_recovery_data(dlm, res, mres); if (ret < 0) mlog(0, "dlm_process_recovery_data returned %d\n", ret); else mlog(0, "dlm_process_recovery_data succeeded\n"); if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { ret = dlm_finish_migration(dlm, res, mres->master); if (ret < 0) mlog_errno(ret); } leave: /* See comment in dlm_mig_lockres_handler() */ if (res) { if (extra_ref) dlm_lockres_put(res); dlm_lockres_put(res); } kfree(data); } static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 *real_master) { struct dlm_node_iter iter; int nodenum; int ret = 0; *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; /* we only reach here if one of the two nodes in a * migration died while the migration was in progress. * at this point we need to requery the master. we * know that the new_master got as far as creating * an mle on at least one node, but we do not know * if any nodes had actually cleared the mle and set * the master to the new_master. the old master * is supposed to set the owner to UNKNOWN in the * event of a new_master death, so the only possible * responses that we can get from nodes here are * that the master is new_master, or that the master * is UNKNOWN. * if all nodes come back with UNKNOWN then we know * the lock needs remastering here. * if any node comes back with a valid master, check * to see if that master is the one that we are * recovering. if so, then the new_master died and * we need to remaster this lock. if not, then the * new_master survived and that node will respond to * other nodes about the owner. * if there is an owner, this node needs to dump this * lockres and alert the sender that this lockres * was rejected. */ spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { /* do not send to self */ if (nodenum == dlm->node_num) continue; ret = dlm_do_master_requery(dlm, res, nodenum, real_master); if (ret < 0) { mlog_errno(ret); if (!dlm_is_host_down(ret)) BUG(); /* host is down, so answer for that node would be * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ } if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "lock master is %u\n", *real_master); break; } } return ret; } int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 nodenum, u8 *real_master) { int ret = -EINVAL; struct dlm_master_requery req; int status = DLM_LOCK_RES_OWNER_UNKNOWN; memset(&req, 0, sizeof(req)); req.node_idx = dlm->node_num; req.namelen = res->lockname.len; memcpy(req.name, res->lockname.name, res->lockname.len); ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, &req, sizeof(req), nodenum, &status); /* XXX: negative status not handled properly here. */ if (ret < 0) mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, dlm->key, nodenum); else { BUG_ON(status < 0); BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); *real_master = (u8) (status & 0xff); mlog(0, "node %u responded to master requery with %u\n", nodenum, *real_master); ret = 0; } return ret; } /* this function cannot error, so unless the sending * or receiving of the message failed, the owner can * be trusted */ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; struct dlm_lock_resource *res = NULL; unsigned int hash; int master = DLM_LOCK_RES_OWNER_UNKNOWN; u32 flags = DLM_ASSERT_MASTER_REQUERY; if (!dlm_grab(dlm)) { /* since the domain has gone away on this * node, the proper response is UNKNOWN */ return master; } hash = dlm_lockid_hash(req->name, req->namelen); spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); if (res) { spin_lock(&res->spinlock); master = res->owner; if (master == dlm->node_num) { int ret = dlm_dispatch_assert_master(dlm, res, 0, 0, flags); if (ret < 0) { mlog_errno(-ENOMEM); /* retry!? */ BUG(); } } else /* put.. incase we are not the master */ dlm_lockres_put(res); spin_unlock(&res->spinlock); } spin_unlock(&dlm->spinlock); dlm_put(dlm); return master; } static inline struct list_head * dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) { struct list_head *ret; BUG_ON(list_num < 0); BUG_ON(list_num > 2); ret = &(res->granted); ret += list_num; return ret; } /* TODO: do ast flush business * TODO: do MIGRATING and RECOVERING spinning */ /* * NOTE about in-flight requests during migration: * * Before attempting the migrate, the master has marked the lockres as * MIGRATING and then flushed all of its pending ASTS. So any in-flight * requests either got queued before the MIGRATING flag got set, in which * case the lock data will reflect the change and a return message is on * the way, or the request failed to get in before MIGRATING got set. In * this case, the caller will be told to spin and wait for the MIGRATING * flag to be dropped, then recheck the master. * This holds true for the convert, cancel and unlock cases, and since lvb * updates are tied to these same messages, it applies to lvb updates as * well. For the lock case, there is no way a lock can be on the master * queue and not be on the secondary queue since the lock is always added * locally first. This means that the new target node will never be sent * a lock that he doesn't already have on the list. * In total, this means that the local lock is correct and should not be * updated to match the one sent by the master. Any messages sent back * from the master before the MIGRATING flag will bring the lock properly * up-to-date, and the change will be ordered properly for the waiter. * We will *not* attempt to modify the lock underneath the waiter. */ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres) { struct dlm_migratable_lock *ml; struct list_head *queue; struct list_head *tmpq = NULL; struct dlm_lock *newlock = NULL; struct dlm_lockstatus *lksb = NULL; int ret = 0; int i, j, bad; struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) { ml = &(mres->ml[i]); if (dlm_is_dummy_lock(dlm, ml, &from)) { /* placeholder, just need to set the refmap bit */ BUG_ON(mres->num_locks != 1); mlog(0, "%s:%.*s: dummy lock for %u\n", dlm->name, mres->lockname_len, mres->lockname, from); spin_lock(&res->spinlock); dlm_lockres_set_refmap_bit(from, res); spin_unlock(&res->spinlock); added++; break; } BUG_ON(ml->highest_blocked != LKM_IVMODE); newlock = NULL; lksb = NULL; queue = dlm_list_num_to_pointer(res, ml->list); tmpq = NULL; /* if the lock is for the local node it needs to * be moved to the proper location within the queue. * do not allocate a new lock structure. */ if (ml->node == dlm->node_num) { /* MIGRATION ONLY! */ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); spin_lock(&res->spinlock); for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { tmpq = dlm_list_idx_to_ptr(res, j); list_for_each_entry(lock, tmpq, list) { if (lock->ml.cookie != ml->cookie) lock = NULL; else break; } if (lock) break; } /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { c = ml->cookie; mlog(ML_ERROR, "Could not find local lock " "with cookie %u:%llu, node %u, " "list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), ml->node, ml->list, ml->flags, ml->type, ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } if (lock->ml.node != ml->node) { c = lock->ml.cookie; mlog(ML_ERROR, "Mismatched node# in lock " "cookie %u:%llu, name %.*s, node %u\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), res->lockname.len, res->lockname.name, lock->ml.node); c = ml->cookie; mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " "node %u, list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), ml->node, ml->list, ml->flags, ml->type, ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } if (tmpq != queue) { c = ml->cookie; mlog(0, "Lock cookie %u:%llu was on list %u " "instead of list %u for %.*s\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), j, ml->list, res->lockname.len, res->lockname.name); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); continue; } /* see NOTE above about why we do not update * to match the master here */ /* move the lock to its proper place */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, queue); spin_unlock(&res->spinlock); added++; mlog(0, "just reordered a local lock!\n"); continue; } /* lock is for another node. */ newlock = dlm_new_lock(ml->type, ml->node, be64_to_cpu(ml->cookie), NULL); if (!newlock) { ret = -ENOMEM; goto leave; } lksb = newlock->lksb; dlm_lock_attach_lockres(newlock, res); if (ml->convert_type != LKM_IVMODE) { BUG_ON(queue != &res->converting); newlock->ml.convert_type = ml->convert_type; } lksb->flags |= (ml->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); if (ml->type == LKM_NLMODE) goto skip_lvb; if (!dlm_lvb_is_empty(mres->lvb)) { if (lksb->flags & DLM_LKSB_PUT_LVB) { /* other node was trying to update * lvb when node died. recreate the * lksb with the updated lvb. */ memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); /* the lock resource lvb update must happen * NOW, before the spinlock is dropped. * we no longer wait for the AST to update * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); if (!dlm_lvb_is_empty(res->lvb) && (ml->type == LKM_EXMODE || memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { int i; mlog(ML_ERROR, "%s:%.*s: received bad " "lvb! type=%d\n", dlm->name, res->lockname.len, res->lockname.name, ml->type); printk("lockres lvb=["); for (i=0; i<DLM_LVB_LEN; i++) printk("%02x", res->lvb[i]); printk("]\nmigrated lvb=["); for (i=0; i<DLM_LVB_LEN; i++) printk("%02x", mres->lvb[i]); printk("]\n"); dlm_print_one_lock_resource(res); BUG(); } memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } } skip_lvb: /* NOTE: * wrt lock queue ordering and recovery: * 1. order of locks on granted queue is * meaningless. * 2. order of locks on converting queue is * LOST with the node death. sorry charlie. * 3. order of locks on the blocked queue is * also LOST. * order of locks does not affect integrity, it * just means that a lock request may get pushed * back in line as a result of the node death. * also note that for a given node the lock order * for its secondary queue locks is preserved * relative to each other, but clearly *not* * preserved relative to locks from other nodes. */ bad = 0; spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c))); mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " "node=%u, cookie=%u:%llu, queue=%d\n", ml->type, ml->convert_type, ml->node, dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), ml->list); __dlm_print_one_lock_resource(res); bad = 1; break; } } if (!bad) { dlm_lock_get(newlock); list_add_tail(&newlock->list, queue); mlog(0, "%s:%.*s: added lock for node %u, " "setting refmap bit\n", dlm->name, res->lockname.len, res->lockname.name, ml->node); dlm_lockres_set_refmap_bit(ml->node, res); added++; } spin_unlock(&res->spinlock); } mlog(0, "done running all the locks\n"); leave: /* balance the ref taken when the work was queued */ spin_lock(&res->spinlock); dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); if (ret < 0) { mlog_errno(ret); if (newlock) dlm_lock_put(newlock); } return ret; } void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int i; struct list_head *queue; struct dlm_lock *lock, *next; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); res->state |= DLM_LOCK_RES_RECOVERING; if (!list_empty(&res->recovering)) { mlog(0, "Recovering res %s:%.*s, is already on recovery list!\n", dlm->name, res->lockname.len, res->lockname.name); list_del_init(&res->recovering); dlm_lockres_put(res); } /* We need to hold a reference while on the recovery list */ dlm_lockres_get(res); list_add_tail(&res->recovering, &dlm->reco.resources); /* find any pending locks and put them back on proper list */ for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry_safe(lock, next, queue, list) { dlm_lock_get(lock); if (lock->convert_pending) { /* move converting lock back to granted */ BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with convert pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); dlm_revert_pending_convert(res, lock); lock->convert_pending = 0; } else if (lock->lock_pending) { /* remove pending lock requests completely */ BUG_ON(i != DLM_BLOCKED_LIST); mlog(0, "node died with lock pending " "on %.*s. remove from blocked list and skip.\n", res->lockname.len, res->lockname.name); /* lock will be floating until ref in * dlmlock_remote is freed after the network * call returns. ok for it to not be on any * list since no ast can be called * (the master is dead). */ dlm_revert_pending_lock(res, lock); lock->lock_pending = 0; } else if (lock->unlock_pending) { /* if an unlock was in progress, treat as * if this had completed successfully * before sending this lock state to the * new master. note that the dlm_unlock * call is still responsible for calling * the unlockast. that will happen after * the network call times out. for now, * just move lists to prepare the new * recovery master. */ BUG_ON(i != DLM_GRANTED_LIST); mlog(0, "node died with unlock pending " "on %.*s. remove from blocked list and skip.\n", res->lockname.len, res->lockname.name); dlm_commit_pending_unlock(res, lock); lock->unlock_pending = 0; } else if (lock->cancel_pending) { /* if a cancel was in progress, treat as * if this had completed successfully * before sending this lock state to the * new master */ BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with cancel pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); dlm_commit_pending_cancel(res, lock); lock->cancel_pending = 0; } dlm_lock_put(lock); } } } /* removes all recovered locks from the recovery list. * sets the res->owner to the new master. * unsets the RECOVERY flag and wakes waiters. */ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, u8 dead_node, u8 new_master) { int i; struct hlist_node *hash_iter; struct hlist_head *bucket; struct dlm_lock_resource *res, *next; assert_spin_locked(&dlm->spinlock); list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { if (res->owner == dead_node) { list_del_init(&res->recovering); spin_lock(&res->spinlock); /* new_master has our reference from * the lock state sent during recovery */ dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; if (__dlm_lockres_has_locks(res)) __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); dlm_lockres_put(res); } } /* this will become unnecessary eventually, but * for now we need to run the whole hash, clear * the RECOVERING state and set the owner * if necessary */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, hash_iter, bucket, hash_node) { if (res->state & DLM_LOCK_RES_RECOVERING) { if (res->owner == dead_node) { mlog(0, "(this=%u) res %.*s owner=%u " "was not on recovering list, but " "clearing state anyway\n", dlm->node_num, res->lockname.len, res->lockname.name, new_master); } else if (res->owner == dlm->node_num) { mlog(0, "(this=%u) res %.*s owner=%u " "was not on recovering list, " "owner is THIS node, clearing\n", dlm->node_num, res->lockname.len, res->lockname.name, new_master); } else continue; if (!list_empty(&res->recovering)) { mlog(0, "%s:%.*s: lockres was " "marked RECOVERING, owner=%u\n", dlm->name, res->lockname.len, res->lockname.name, res->owner); list_del_init(&res->recovering); dlm_lockres_put(res); } spin_lock(&res->spinlock); /* new_master has our reference from * the lock state sent during recovery */ dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; if (__dlm_lockres_has_locks(res)) __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); } } } } static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) { if (local) { if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) return 1; } else if (lock->ml.type == LKM_EXMODE) return 1; return 0; } static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 dead_node) { struct list_head *queue; struct dlm_lock *lock; int blank_lvb = 0, local = 0; int i; u8 search_node; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { /* if this is a secondary lockres, and we had no EX or PR * locks granted, we can no longer trust the lvb */ search_node = dlm->node_num; local = 1; /* check local state for valid lvb */ } for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry(lock, queue, list) { if (lock->ml.node == search_node) { if (dlm_lvb_needs_invalidation(lock, local)) { /* zero the lksb lvb and lockres lvb */ blank_lvb = 1; memset(lock->lksb->lvb, 0, DLM_LVB_LEN); } } } } if (blank_lvb) { mlog(0, "clearing %.*s lvb, dead node %u had EX\n", res->lockname.len, res->lockname.name, dead_node); memset(res->lvb, 0, DLM_LVB_LEN); } } static void dlm_free_dead_locks(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 dead_node) { struct dlm_lock *lock, *next; unsigned int freed = 0; /* this node is the lockres master: * 1) remove any stale locks for the dead node * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); /* We do two dlm_lock_put(). One for removing from list and the other is * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ /* TODO: check pending_asts, pending_basts here */ list_for_each_entry_safe(lock, next, &res->granted, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } list_for_each_entry_safe(lock, next, &res->converting, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } list_for_each_entry_safe(lock, next, &res->blocked, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } if (freed) { mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); if(!test_bit(dead_node, res->refmap)) { mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " "but ref was not set\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); __dlm_print_one_lock_resource(res); } dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " "no locks and had not purged before dying\n", dlm->name, res->lockname.len, res->lockname.name, dead_node); dlm_lockres_clear_refmap_bit(dead_node, res); } /* do not kick thread yet */ __dlm_dirty_lockres(dlm, res); } /* if this node is the recovery master, and there are no * locks for a given lockres owned by this node that are in * either PR or EX mode, zero out the lvb before requesting. * */ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) { struct hlist_node *iter; struct dlm_lock_resource *res; int i; struct hlist_head *bucket; struct dlm_lock *lock; /* purge any stale mles */ dlm_clean_master_list(dlm, dead_node); /* * now clean up all lock resources. there are two rules: * * 1) if the dead node was the master, move the lockres * to the recovering list. set the RECOVERING flag. * this lockres needs to be cleaned up before it can * be used further. * * 2) if this node was the master, remove all locks from * each of the lockres queues that were owned by the * dead node. once recovery finishes, the dlm thread * can be kicked again to see if any ASTs or BASTs * need to be fired as a result. */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, iter, bucket, hash_node) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { spin_lock(&res->spinlock); list_for_each_entry(lock, &res->granted, list) { if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); break; } } spin_unlock(&res->spinlock); continue; } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); if (res->owner == dead_node) { if (res->state & DLM_LOCK_RES_DROPPING_REF) { mlog(ML_NOTICE, "Ignore %.*s for " "recovery as it is being freed\n", res->lockname.len, res->lockname.name); } else dlm_move_lockres_to_recovery_list(dlm, res); } else if (res->owner == dlm->node_num) { dlm_free_dead_locks(dlm, res, dead_node); __dlm_lockres_calc_usage(dlm, res); } spin_unlock(&res->spinlock); } } } static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) { assert_spin_locked(&dlm->spinlock); if (dlm->reco.new_master == idx) { mlog(0, "%s: recovery master %d just died\n", dlm->name, idx); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { /* finalize1 was reached, so it is safe to clear * the new_master and dead_node. that recovery * is complete. */ mlog(0, "%s: dead master %d had reached " "finalize1 state, clearing\n", dlm->name, idx); dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; __dlm_reset_recovery(dlm); } } /* Clean up join state on node death. */ if (dlm->joining_node == idx) { mlog(0, "Clearing join state for node %u\n", idx); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); } /* check to see if the node is already considered dead */ if (!test_bit(idx, dlm->live_nodes_map)) { mlog(0, "for domain %s, node %d is already dead. " "another node likely did recovery already.\n", dlm->name, idx); return; } /* check to see if we do not care about this node */ if (!test_bit(idx, dlm->domain_map)) { /* This also catches the case that we get a node down * but haven't joined the domain yet. */ mlog(0, "node %u already removed from domain!\n", idx); return; } clear_bit(idx, dlm->live_nodes_map); /* make sure local cleanup occurs before the heartbeat events */ if (!test_bit(idx, dlm->recovery_map)) dlm_do_local_recovery_cleanup(dlm, idx); /* notify anything attached to the heartbeat events */ dlm_hb_event_notify_attached(dlm, idx, 0); mlog(0, "node %u being removed from domain map!\n", idx); clear_bit(idx, dlm->domain_map); clear_bit(idx, dlm->exit_domain_map); /* wake up migration waiters if a node goes down. * perhaps later we can genericize this for other waiters. */ wake_up(&dlm->migration_wq); if (test_bit(idx, dlm->recovery_map)) mlog(0, "domain %s, node %u already added " "to recovery map!\n", dlm->name, idx); else set_bit(idx, dlm->recovery_map); } void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) { struct dlm_ctxt *dlm = data; if (!dlm_grab(dlm)) return; /* * This will notify any dlm users that a node in our domain * went away without notifying us first. */ if (test_bit(idx, dlm->domain_map)) dlm_fire_domain_eviction_callbacks(dlm, idx); spin_lock(&dlm->spinlock); __dlm_hb_node_down(dlm, idx); spin_unlock(&dlm->spinlock); dlm_put(dlm); } void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) { struct dlm_ctxt *dlm = data; if (!dlm_grab(dlm)) return; spin_lock(&dlm->spinlock); set_bit(idx, dlm->live_nodes_map); /* do NOT notify mle attached to the heartbeat events. * new nodes are not interesting in mastery until joined. */ spin_unlock(&dlm->spinlock); dlm_put(dlm); } static void dlm_reco_ast(void *astdata) { struct dlm_ctxt *dlm = astdata; mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", dlm->node_num, dlm->name); } static void dlm_reco_bast(void *astdata, int blocked_type) { struct dlm_ctxt *dlm = astdata; mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", dlm->node_num, dlm->name); } static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) { mlog(0, "unlockast for recovery lock fired!\n"); } /* * dlm_pick_recovery_master will continually attempt to use * dlmlock() on the special "$RECOVERY" lockres with the * LKM_NOQUEUE flag to get an EX. every thread that enters * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will * be detected in dlm_do_recovery */ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) { enum dlm_status ret; struct dlm_lockstatus lksb; int status = -EINVAL; mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, dlm_reco_ast, dlm, dlm_reco_bast); mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", dlm->name, ret, lksb.status); if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " "do the recovery\n", dlm->name, dlm->reco.new_master); status = -EEXIST; } else { status = 0; /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } } spin_unlock(&dlm->spinlock); } /* if this node has actually become the recovery master, * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); /* this always succeeds */ BUG_ON(status); /* set the new_master to this node */ spin_lock(&dlm->spinlock); dlm_set_reco_master(dlm, dlm->node_num); spin_unlock(&dlm->spinlock); } /* recovery lock is a special case. ast will not get fired, * so just go ahead and unlock it. */ ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); if (ret == DLM_DENIED) { mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); } if (ret != DLM_NORMAL) { /* this would really suck. this could only happen * if there was a network error during the unlock * because of node death. this means the unlock * is actually "done" and the lock structure is * even freed. we can continue, but only * because this specific lock name is special. */ mlog(ML_ERROR, "dlmunlock returned %d\n", ret); } } else if (ret == DLM_NOTQUEUED) { mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), msecs_to_jiffies(1000)); if (!dlm_reco_master_ready(dlm)) { mlog(0, "%s: reco master taking awhile\n", dlm->name); goto again; } /* another node has informed this one that it is reco master */ mlog(0, "%s: reco master %u is ready to recover %u\n", dlm->name, dlm->reco.new_master, dlm->reco.dead_node); status = -EEXIST; } else if (ret == DLM_RECOVERING) { mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", dlm->name, dlm->node_num); goto again; } else { struct dlm_lock_resource *res; /* dlmlock returned something other than NOTQUEUED or NORMAL */ mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " "lksb.status=%s\n", dlm->name, dlm_errname(ret), dlm_errname(lksb.status)); res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN); if (res) { dlm_print_one_lock_resource(res); dlm_lockres_put(res); } else { mlog(ML_ERROR, "recovery lock not found\n"); } BUG(); } return status; } static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_begin_reco br; int ret = 0; struct dlm_node_iter iter; int nodenum; int status; mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); clear_bit(dead_node, iter.node_map); memset(&br, 0, sizeof(br)); br.node_idx = dlm->node_num; br.dead_node = dead_node; while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { ret = 0; if (nodenum == dead_node) { mlog(0, "not sending begin reco to dead node " "%u\n", dead_node); continue; } if (nodenum == dlm->node_num) { mlog(0, "not sending begin reco to self\n"); continue; } retry: ret = -EINVAL; mlog(0, "attempting to send begin reco msg to %d\n", nodenum); ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, &br, sizeof(br), nodenum, &status); /* negative status is handled ok by caller here */ if (ret >= 0) ret = status; if (dlm_is_host_down(ret)) { /* node is down. not involved in recovery * so just keep going */ mlog(ML_NOTICE, "%s: node %u was down when sending " "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } /* * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. * We are handling both for compatibility reasons. */ if (ret == -EAGAIN || ret == EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, dead_node, nodenum); msleep(100); goto retry; } if (ret < 0) { struct dlm_lock_resource *res; /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " "returned %d\n", dlm->name, nodenum, ret); res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN); if (res) { dlm_print_one_lock_resource(res); dlm_lockres_put(res); } else { mlog(ML_ERROR, "recovery lock not found\n"); } /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; } } return ret; } int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; /* ok to return 0, domain has gone away */ if (!dlm_grab(dlm)) return 0; spin_lock(&dlm->spinlock); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { mlog(0, "%s: node %u wants to recover node %u (%u:%u) " "but this node is in finalize state, waiting on finalize2\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); spin_unlock(&dlm->spinlock); return -EAGAIN; } spin_unlock(&dlm->spinlock); mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); spin_lock(&dlm->spinlock); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { mlog(0, "%s: new_master %u died, changing " "to %u\n", dlm->name, dlm->reco.new_master, br->node_idx); } else { mlog(0, "%s: new_master %u NOT DEAD, changing " "to %u\n", dlm->name, dlm->reco.new_master, br->node_idx); /* may not have seen the new master as dead yet */ } } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); dlm_set_reco_dead_node(dlm, br->dead_node); if (!test_bit(br->dead_node, dlm->recovery_map)) { mlog(0, "recovery master %u sees %u as dead, but this " "node has not yet. marking %u as dead\n", br->node_idx, br->dead_node, br->dead_node); if (!test_bit(br->dead_node, dlm->domain_map) || !test_bit(br->dead_node, dlm->live_nodes_map)) mlog(0, "%u not in domain/live_nodes map " "so setting it in reco map manually\n", br->dead_node); /* force the recovery cleanup in __dlm_hb_node_down * both of these will be cleared in a moment */ set_bit(br->dead_node, dlm->domain_map); set_bit(br->dead_node, dlm->live_nodes_map); __dlm_hb_node_down(dlm, br->dead_node); } spin_unlock(&dlm->spinlock); dlm_kick_recovery_thread(dlm); mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); dlm_put(dlm); return 0; } #define DLM_FINALIZE_STAGE2 0x01 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) { int ret = 0; struct dlm_finalize_reco fr; struct dlm_node_iter iter; int nodenum; int status; int stage = 1; mlog(0, "finishing recovery for node %s:%u, " "stage %d\n", dlm->name, dlm->reco.dead_node, stage); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); stage2: memset(&fr, 0, sizeof(fr)); fr.node_idx = dlm->node_num; fr.dead_node = dlm->reco.dead_node; if (stage == 2) fr.flags |= DLM_FINALIZE_STAGE2; while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { if (nodenum == dlm->node_num) continue; ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, &fr, sizeof(fr), nodenum, &status); if (ret >= 0) ret = status; if (ret < 0) { mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, dlm->key, nodenum); if (dlm_is_host_down(ret)) { /* this has no effect on this recovery * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); ret = 0; continue; } break; } } if (stage == 1) { /* reset the node_iter back to the top and send finalize2 */ iter.curnode = -1; stage = 2; goto stage2; } return ret; } int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; int stage = 1; /* ok to return 0, domain has gone away */ if (!dlm_grab(dlm)) return 0; if (fr->flags & DLM_FINALIZE_STAGE2) stage = 2; mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " "%u is supposed to be the new master, dead=%u\n", fr->node_idx, dlm->reco.new_master, fr->dead_node); BUG(); } if (dlm->reco.dead_node != fr->dead_node) { mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " "node %u, but node %u is supposed to be dead\n", fr->node_idx, fr->dead_node, dlm->reco.dead_node); BUG(); } switch (stage) { case 1: dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { mlog(ML_ERROR, "%s: received finalize1 from " "new master %u for dead node %u, but " "this node has already received it!\n", dlm->name, fr->node_idx, fr->dead_node); dlm_print_reco_node_status(dlm); BUG(); } dlm->reco.state |= DLM_RECO_STATE_FINALIZE; spin_unlock(&dlm->spinlock); break; case 2: if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { mlog(ML_ERROR, "%s: received finalize2 from " "new master %u for dead node %u, but " "this node did not have finalize1!\n", dlm->name, fr->node_idx, fr->dead_node); dlm_print_reco_node_status(dlm); BUG(); } dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; spin_unlock(&dlm->spinlock); dlm_reset_recovery(dlm); dlm_kick_recovery_thread(dlm); break; default: BUG(); } mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); dlm_put(dlm); return 0; }
gpl-2.0
djvoleur/kernel_samsung_exynos7420
drivers/gpu/drm/i915/dvo_tfp410.c
2805
8160
/* * Copyright © 2007 Dave Mueller * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Mueller <dave.mueller@gmx.ch> * */ #include "dvo.h" /* register definitions according to the TFP410 data sheet */ #define TFP410_VID 0x014C #define TFP410_DID 0x0410 #define TFP410_VID_LO 0x00 #define TFP410_VID_HI 0x01 #define TFP410_DID_LO 0x02 #define TFP410_DID_HI 0x03 #define TFP410_REV 0x04 #define TFP410_CTL_1 0x08 #define TFP410_CTL_1_TDIS (1<<6) #define TFP410_CTL_1_VEN (1<<5) #define TFP410_CTL_1_HEN (1<<4) #define TFP410_CTL_1_DSEL (1<<3) #define TFP410_CTL_1_BSEL (1<<2) #define TFP410_CTL_1_EDGE (1<<1) #define TFP410_CTL_1_PD (1<<0) #define TFP410_CTL_2 0x09 #define TFP410_CTL_2_VLOW (1<<7) #define TFP410_CTL_2_MSEL_MASK (0x7<<4) #define TFP410_CTL_2_MSEL (1<<4) #define TFP410_CTL_2_TSEL (1<<3) #define TFP410_CTL_2_RSEN (1<<2) #define TFP410_CTL_2_HTPLG (1<<1) #define TFP410_CTL_2_MDI (1<<0) #define TFP410_CTL_3 0x0A #define TFP410_CTL_3_DK_MASK (0x7<<5) #define TFP410_CTL_3_DK (1<<5) #define TFP410_CTL_3_DKEN (1<<4) #define TFP410_CTL_3_CTL_MASK (0x7<<1) #define TFP410_CTL_3_CTL (1<<1) #define TFP410_USERCFG 0x0B #define TFP410_DE_DLY 0x32 #define TFP410_DE_CTL 0x33 #define TFP410_DE_CTL_DEGEN (1<<6) #define TFP410_DE_CTL_VSPOL (1<<5) #define TFP410_DE_CTL_HSPOL (1<<4) #define TFP410_DE_CTL_DEDLY8 (1<<0) #define TFP410_DE_TOP 0x34 #define TFP410_DE_CNT_LO 0x36 #define TFP410_DE_CNT_HI 0x37 #define TFP410_DE_LIN_LO 0x38 #define TFP410_DE_LIN_HI 0x39 #define TFP410_H_RES_LO 0x3A #define TFP410_H_RES_HI 0x3B #define TFP410_V_RES_LO 0x3C #define TFP410_V_RES_HI 0x3D struct tfp410_priv { bool quiet; }; static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } static int tfp410_getid(struct intel_dvo_device *dvo, int addr) { uint8_t ch1, ch2; if (tfp410_readb(dvo, addr+0, &ch1) && tfp410_readb(dvo, addr+1, &ch2)) return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); return -1; } /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; int id; tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); if (tfp == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; return true; out: kfree(tfp); return false; } static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) { enum drm_connector_status ret = connector_status_disconnected; uint8_t ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { if (ctl2 & TFP410_CTL_2_RSEN) ret = connector_status_connected; else ret = connector_status_disconnected; } return ret; } static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void tfp410_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock dependencies * in the mode setup, we can just leave the registers alone and everything * will work fine. */ /* don't do much */ return; } /* set the tfp410 power state */ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; if (enable) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return false; if (ctl1 & TFP410_CTL_1_PD) return true; else return false; } static void tfp410_dump_regs(struct intel_dvo_device *dvo) { uint8_t val, val2; tfp410_readb(dvo, TFP410_REV, &val); DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_1, &val); DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_2, &val); DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_3, &val); DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); tfp410_readb(dvo, TFP410_USERCFG, &val); DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_DLY, &val); DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CTL, &val); DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_TOP, &val); DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_H_RES_LO, &val); tfp410_readb(dvo, TFP410_H_RES_HI, &val2); DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_V_RES_LO, &val); tfp410_readb(dvo, TFP410_V_RES_HI, &val2); DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } static void tfp410_destroy(struct intel_dvo_device *dvo) { struct tfp410_priv *tfp = dvo->dev_priv; if (tfp) { kfree(tfp); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, .get_hw_state = tfp410_get_hw_state, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, };
gpl-2.0
javilonas/Lonas_KL-HTC-Desire-2.6.38.8
sound/pci/echoaudio/indigoiox.c
3573
3015
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2009 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_IOX #define ECHOCARD_NAME "Indigo IOx" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 0 */ #define PX_NUM 10 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 2 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_iox_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_IOX_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_iox_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x00D0, 0, 0, 0}, /* Indigo IOx */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigoiox_dsp.c" #include "indigo_express_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
futuretekinc/cortina-kernel-2.6.36
arch/arm/mach-s5pv210/init.c
3829
1076
/* linux/arch/arm/mach-s5pv210/init.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/serial_core.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/s5pv210.h> #include <plat/regs-serial.h> static struct s3c24xx_uart_clksrc s5pv210_serial_clocks[] = { [0] = { .name = "pclk", .divisor = 1, .min_baud = 0, .max_baud = 0, }, }; /* uart registration process */ void __init s5pv210_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) { struct s3c2410_uartcfg *tcfg = cfg; u32 ucnt; for (ucnt = 0; ucnt < no; ucnt++, tcfg++) { if (!tcfg->clocks) { tcfg->clocks = s5pv210_serial_clocks; tcfg->clocks_size = ARRAY_SIZE(s5pv210_serial_clocks); } } s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); }
gpl-2.0
motley-git/kernel-Nexus4
fs/cifs/cifs_dfs_ref.c
3829
9722
/* * Contains the CIFS DFS referral mounting routines used for handling * traversal via DFS junction point * * Copyright (c) 2007 Igor Mammedov * Copyright (C) International Business Machines Corp., 2008 * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dcache.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/vfs.h> #include <linux/fs.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifsfs.h" #include "dns_resolve.h" #include "cifs_debug.h" static LIST_HEAD(cifs_dfs_automount_list); static void cifs_dfs_expire_automounts(struct work_struct *work); static DECLARE_DELAYED_WORK(cifs_dfs_automount_task, cifs_dfs_expire_automounts); static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ; static void cifs_dfs_expire_automounts(struct work_struct *work) { struct list_head *list = &cifs_dfs_automount_list; mark_mounts_for_expiry(list); if (!list_empty(list)) schedule_delayed_work(&cifs_dfs_automount_task, cifs_dfs_mountpoint_expiry_timeout); } void cifs_dfs_release_automount_timer(void) { BUG_ON(!list_empty(&cifs_dfs_automount_list)); cancel_delayed_work_sync(&cifs_dfs_automount_task); } /** * cifs_get_share_name - extracts share name from UNC * @node_name: pointer to UNC string * * Extracts sharename form full UNC. * i.e. strips from UNC trailing path that is not part of share * name and fixup missing '\' in the beginning of DFS node refferal * if necessary. * Returns pointer to share name on success or ERR_PTR on error. * Caller is responsible for freeing returned string. */ static char *cifs_get_share_name(const char *node_name) { int len; char *UNC; char *pSep; len = strlen(node_name); UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, GFP_KERNEL); if (!UNC) return ERR_PTR(-ENOMEM); /* get share name and server name */ if (node_name[1] != '\\') { UNC[0] = '\\'; strncpy(UNC+1, node_name, len); len++; UNC[len] = 0; } else { strncpy(UNC, node_name, len); UNC[len] = 0; } /* find server name end */ pSep = memchr(UNC+2, '\\', len-2); if (!pSep) { cERROR(1, "%s: no server name end in node name: %s", __func__, node_name); kfree(UNC); return ERR_PTR(-EINVAL); } /* find sharename end */ pSep++; pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC)); if (pSep) { /* trim path up to sharename end * now we have share name in UNC */ *pSep = 0; } return UNC; } /** * cifs_compose_mount_options - creates mount options for refferral * @sb_mountdata: parent/root DFS mount options (template) * @fullpath: full path in UNC format * @ref: server's referral * @devname: pointer for saving device name * * creates mount options for submount based on template options sb_mountdata * and replacing unc,ip,prefixpath options with ones we've got form ref_unc. * * Returns: pointer to new mount options or ERR_PTR. * Caller is responcible for freeing retunrned value if it is not error. */ char *cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, const struct dfs_info3_param *ref, char **devname) { int rc; char *mountdata = NULL; int md_len; char *tkn_e; char *srvIP = NULL; char sep = ','; int off, noff; if (sb_mountdata == NULL) return ERR_PTR(-EINVAL); *devname = cifs_get_share_name(ref->node_name); if (IS_ERR(*devname)) { rc = PTR_ERR(*devname); *devname = NULL; goto compose_mount_options_err; } rc = dns_resolve_server_name_to_ip(*devname, &srvIP); if (rc < 0) { cFYI(1, "%s: Failed to resolve server part of %s to IP: %d", __func__, *devname, rc); goto compose_mount_options_err; } /* md_len = strlen(...) + 12 for 'sep+prefixpath=' * assuming that we have 'unc=' and 'ip=' in * the original sb_mountdata */ md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12; mountdata = kzalloc(md_len+1, GFP_KERNEL); if (mountdata == NULL) { rc = -ENOMEM; goto compose_mount_options_err; } /* copy all options except of unc,ip,prefixpath */ off = 0; if (strncmp(sb_mountdata, "sep=", 4) == 0) { sep = sb_mountdata[4]; strncpy(mountdata, sb_mountdata, 5); off += 5; } do { tkn_e = strchr(sb_mountdata + off, sep); if (tkn_e == NULL) noff = strlen(sb_mountdata + off); else noff = tkn_e - (sb_mountdata + off) + 1; if (strnicmp(sb_mountdata + off, "unc=", 4) == 0) { off += noff; continue; } if (strnicmp(sb_mountdata + off, "ip=", 3) == 0) { off += noff; continue; } if (strnicmp(sb_mountdata + off, "prefixpath=", 11) == 0) { off += noff; continue; } strncat(mountdata, sb_mountdata + off, noff); off += noff; } while (tkn_e); strcat(mountdata, sb_mountdata + off); mountdata[md_len] = '\0'; /* copy new IP and ref share name */ if (mountdata[strlen(mountdata) - 1] != sep) strncat(mountdata, &sep, 1); strcat(mountdata, "ip="); strcat(mountdata, srvIP); strncat(mountdata, &sep, 1); strcat(mountdata, "unc="); strcat(mountdata, *devname); /* find & copy prefixpath */ tkn_e = strchr(ref->node_name + 2, '\\'); if (tkn_e == NULL) { /* invalid unc, missing share name*/ rc = -EINVAL; goto compose_mount_options_err; } tkn_e = strchr(tkn_e + 1, '\\'); if (tkn_e || (strlen(fullpath) - ref->path_consumed)) { strncat(mountdata, &sep, 1); strcat(mountdata, "prefixpath="); if (tkn_e) strcat(mountdata, tkn_e + 1); strcat(mountdata, fullpath + ref->path_consumed); } /*cFYI(1, "%s: parent mountdata: %s", __func__,sb_mountdata);*/ /*cFYI(1, "%s: submount mountdata: %s", __func__, mountdata );*/ compose_mount_options_out: kfree(srvIP); return mountdata; compose_mount_options_err: kfree(mountdata); mountdata = ERR_PTR(rc); goto compose_mount_options_out; } /** * cifs_dfs_do_refmount - mounts specified path using provided refferal * @cifs_sb: parent/root superblock * @fullpath: full path in UNC format * @ref: server's referral */ static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb, const char *fullpath, const struct dfs_info3_param *ref) { struct vfsmount *mnt; char *mountdata; char *devname = NULL; /* strip first '\' from fullpath */ mountdata = cifs_compose_mount_options(cifs_sb->mountdata, fullpath + 1, ref, &devname); if (IS_ERR(mountdata)) return (struct vfsmount *)mountdata; mnt = vfs_kern_mount(&cifs_fs_type, 0, devname, mountdata); kfree(mountdata); kfree(devname); return mnt; } static void dump_referral(const struct dfs_info3_param *ref) { cFYI(1, "DFS: ref path: %s", ref->path_name); cFYI(1, "DFS: node path: %s", ref->node_name); cFYI(1, "DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type); cFYI(1, "DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag, ref->path_consumed); } /* * Create a vfsmount that we can automount */ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) { struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; struct cifs_sb_info *cifs_sb; struct cifs_ses *ses; char *full_path; int xid, i; int rc; struct vfsmount *mnt; struct tcon_link *tlink; cFYI(1, "in %s", __func__); BUG_ON(IS_ROOT(mntpt)); /* * The MSDFS spec states that paths in DFS referral requests and * responses must be prefixed by a single '\' character instead of * the double backslashes usually used in the UNC. This function * gives us the latter, so we must adjust the result. */ mnt = ERR_PTR(-ENOMEM); full_path = build_path_from_dentry(mntpt); if (full_path == NULL) goto cdda_exit; cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; } ses = tlink_tcon(tlink)->ses; xid = GetXid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); FreeXid(xid); cifs_put_tlink(tlink); mnt = ERR_PTR(-ENOENT); for (i = 0; i < num_referrals; i++) { int len; dump_referral(referrals + i); /* connect to a node */ len = strlen(referrals[i].node_name); if (len < 2) { cERROR(1, "%s: Net Address path too short: %s", __func__, referrals[i].node_name); mnt = ERR_PTR(-EINVAL); break; } mnt = cifs_dfs_do_refmount(cifs_sb, full_path, referrals + i); cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, referrals[i].node_name, mnt); if (!IS_ERR(mnt)) goto success; } /* no valid submounts were found; return error from get_dfs_path() by * preference */ if (rc != 0) mnt = ERR_PTR(rc); success: free_dfs_info_array(referrals, num_referrals); free_full_path: kfree(full_path); cdda_exit: cFYI(1, "leaving %s" , __func__); return mnt; } /* * Attempt to automount the referral */ struct vfsmount *cifs_dfs_d_automount(struct path *path) { struct vfsmount *newmnt; cFYI(1, "in %s", __func__); newmnt = cifs_dfs_do_automount(path->dentry); if (IS_ERR(newmnt)) { cFYI(1, "leaving %s [automount failed]" , __func__); return newmnt; } mntget(newmnt); /* prevent immediate expiration */ mnt_set_expiry(newmnt, &cifs_dfs_automount_list); schedule_delayed_work(&cifs_dfs_automount_task, cifs_dfs_mountpoint_expiry_timeout); cFYI(1, "leaving %s [ok]" , __func__); return newmnt; } const struct inode_operations cifs_dfs_referral_inode_operations = { };
gpl-2.0
Kurre/kernel_msm
drivers/staging/vt6655/bssdb.c
4853
58720
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: bssdb.c * * Purpose: Handles the Basic Service Set & Node Database functions * * Functions: * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID * BSSvClearBSSList - Clear BSS List * BSSbInsertToBSSList - Insert a BSS set into known BSS list * BSSbUpdateToBSSList - Update BSS set in known BSS list * BSSDBbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr * BSSvCreateOneNode - Allocate an Node for Node DB * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control * * Revision History: * * Author: Lyndon Chen * * Date: July 17, 2002 * */ #include "ttype.h" #include "tmacro.h" #include "tether.h" #include "device.h" #include "80211hdr.h" #include "bssdb.h" #include "wmgr.h" #include "datarate.h" #include "desc.h" #include "wcmd.h" #include "wpa.h" #include "baseband.h" #include "rf.h" #include "card.h" #include "channel.h" #include "mac.h" #include "wpa2.h" #include "iowpa.h" //#define PLICE_DEBUG /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; const unsigned short awHWRetry0[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M}, {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M}, {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M}, {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M}, {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M} }; const unsigned short awHWRetry1[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M}, {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M}, {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M}, {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M}, {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M} }; /*--------------------- Static Functions --------------------------*/ void s_vCheckSensitivity( void *hDeviceContext ); #ifdef Calcu_LinkQual void s_uCalculateLinkQual( void *hDeviceContext ); #endif void s_vCheckPreEDThreshold( void *hDeviceContext ); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Routine Description: * Search known BSS list for Desire SSID or BSSID. * * Return Value: * PTR to KnownBSS or NULL * -*/ PKnownBSS BSSpSearchBSSList( void *hDeviceContext, unsigned char *pbyDesireBSSID, unsigned char *pbyDesireSSID, CARD_PHY_TYPE ePhyType ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned char *pbyBSSID = NULL; PWLAN_IE_SSID pSSID = NULL; PKnownBSS pCurrBSS = NULL; PKnownBSS pSelect = NULL; unsigned char ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00}; unsigned int ii = 0; if (pbyDesireBSSID != NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList BSSID[%02X %02X %02X-%02X %02X %02X]\n", *pbyDesireBSSID,*(pbyDesireBSSID+1),*(pbyDesireBSSID+2), *(pbyDesireBSSID+3),*(pbyDesireBSSID+4),*(pbyDesireBSSID+5)); if ((!is_broadcast_ether_addr(pbyDesireBSSID)) && (memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){ pbyBSSID = pbyDesireBSSID; } } if (pbyDesireSSID != NULL) { if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) { pSSID = (PWLAN_IE_SSID) pbyDesireSSID; } } if (pbyBSSID != NULL) { // match BSSID first for (ii = 0; ii <MAX_BSS_NUM; ii++) { pCurrBSS = &(pMgmt->sBSSList[ii]); if(pDevice->bLinkPass==false) pCurrBSS->bSelected = false; if ((pCurrBSS->bActive) && (pCurrBSS->bSelected == false)) { if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) { if (pSSID != NULL) { // compare ssid if ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID, pSSID->len)) { if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) || ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ) { pCurrBSS->bSelected = true; return(pCurrBSS); } } } else { if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) || ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ) { pCurrBSS->bSelected = true; return(pCurrBSS); } } } } } } else { // ignore BSSID for (ii = 0; ii <MAX_BSS_NUM; ii++) { pCurrBSS = &(pMgmt->sBSSList[ii]); //2007-0721-01<Add>by MikeLiu pCurrBSS->bSelected = false; if (pCurrBSS->bActive) { if (pSSID != NULL) { // matched SSID if (! !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID, pSSID->len) || (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) { // SSID not match skip this BSS continue; } } if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) || ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ){ // Type not match skip this BSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo); continue; } if (ePhyType != PHY_TYPE_AUTO) { if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) || ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) { // PhyType not match skip this BSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse); continue; } } /* if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) { if (pCurrBSS->bWPAValid == true) { // WPA AP will reject connection of station without WPA enable. continue; } } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) { if (pCurrBSS->bWPAValid == false) { // station with WPA enable can't join NonWPA AP. continue; } } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { if (pCurrBSS->bWPA2Valid == false) { // station with WPA2 enable can't join NonWPA2 AP. continue; } } */ if (pSelect == NULL) { pSelect = pCurrBSS; } else { // compare RSSI, select signal strong one if (pCurrBSS->uRSSI < pSelect->uRSSI) { pSelect = pCurrBSS; } } } } if (pSelect != NULL) { pSelect->bSelected = true; /* if (pDevice->bRoaming == false) { // Einsn Add @20070907 memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ; }*/ return(pSelect); } } return(NULL); } /*+ * * Routine Description: * Clear BSS List * * Return Value: * None. * -*/ void BSSvClearBSSList( void *hDeviceContext, bool bKeepCurrBSSID ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (bKeepCurrBSSID) { if (pMgmt->sBSSList[ii].bActive && !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) { // bKeepCurrBSSID = false; continue; } } if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) { pMgmt->sBSSList[ii].uClearCount ++; continue; } pMgmt->sBSSList[ii].bActive = false; memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS)); } BSSvClearAnyBSSJoinRecord(pDevice); return; } /*+ * * Routine Description: * search BSS list by BSSID & SSID if matched * * Return Value: * true if found. * -*/ PKnownBSS BSSpAddrIsInBSSList( void *hDeviceContext, unsigned char *abyBSSID, PWLAN_IE_SSID pSSID ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PKnownBSS pBSSList = NULL; unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSSList = &(pMgmt->sBSSList[ii]); if (pBSSList->bActive) { if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) { // if (pSSID == NULL) // return pBSSList; if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){ if (memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID, pSSID->len) == 0) return pBSSList; } } } } return NULL; }; /*+ * * Routine Description: * Insert a BSS set into known BSS list * * Return Value: * true if success. * -*/ bool BSSbInsertToBSSList ( void *hDeviceContext, unsigned char *abyBSSIDAddr, QWORD qwTimestamp, unsigned short wBeaconInterval, unsigned short wCapInfo, unsigned char byCurrChannel, PWLAN_IE_SSID pSSID, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates, PERPObject psERP, PWLAN_IE_RSN pRSN, PWLAN_IE_RSN_EXT pRSNWPA, PWLAN_IE_COUNTRY pIE_Country, PWLAN_IE_QUIET pIE_Quiet, unsigned int uIELength, unsigned char *pbyIEs, void *pRxPacketContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext; PKnownBSS pBSSList = NULL; unsigned int ii; bool bParsingQuiet = false; PWLAN_IE_QUIET pQuiet = NULL; pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]); for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]); if (!pBSSList->bActive) break; } if (ii == MAX_BSS_NUM){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n"); return false; } // save the BSS info pBSSList->bActive = true; memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN); HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp)); LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp)); pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval); pBSSList->wCapInfo = cpu_to_le16(wCapInfo); pBSSList->uClearCount = 0; if (pSSID->len > WLAN_SSID_MAXLEN) pSSID->len = WLAN_SSID_MAXLEN; memcpy( pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN); pBSSList->uChannel = byCurrChannel; if (pSuppRates->len > WLAN_RATES_MAXLEN) pSuppRates->len = WLAN_RATES_MAXLEN; memcpy( pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN); if (pExtSuppRates != NULL) { if (pExtSuppRates->len > WLAN_RATES_MAXLEN) pExtSuppRates->len = WLAN_RATES_MAXLEN; memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbInsertToBSSList: pExtSuppRates->len = %d\n", pExtSuppRates->len); } else { memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); } pBSSList->sERP.byERP = psERP->byERP; pBSSList->sERP.bERPExist = psERP->bERPExist; // Check if BSS is 802.11a/b/g if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11A; } else { if (pBSSList->sERP.bERPExist == true) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11G; } else { pBSSList->eNetworkTypeInUse = PHY_TYPE_11B; } } pBSSList->byRxRate = pRxPacket->byRxRate; pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF; pBSSList->uRSSI = pRxPacket->uRSSI; pBSSList->bySQ = pRxPacket->bySQ; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { // assoc with BSS if (pBSSList == pMgmt->pCurrBSS) { bParsingQuiet = true; } } WPA_ClearRSN(pBSSList); if (pRSNWPA != NULL) { unsigned int uLen = pRSNWPA->len + 2; if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) { pBSSList->wWPALen = uLen; memcpy(pBSSList->byWPAIE, pRSNWPA, uLen); WPA_ParseRSN(pBSSList, pRSNWPA); } } WPA2_ClearRSN(pBSSList); if (pRSN != NULL) { unsigned int uLen = pRSN->len + 2; if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) { pBSSList->wRSNLen = uLen; memcpy(pBSSList->byRSNIE, pRSN, uLen); WPA2vParseRSN(pBSSList, pRSN); } } if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) { PSKeyItem pTransmitKey = NULL; bool bIs802_1x = false; for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) { if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) { bIs802_1x = true; break; } } if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) && ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) { bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj); if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) || (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) { pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList; pDevice->gsPMKIDCandidate.Version = 1; } } } } if (pDevice->bUpdateBBVGA) { // Moniter if RSSI is too strong. pBSSList->byRSSIStatCnt = 0; RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX); pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX; for (ii = 1; ii < RSSI_STAT_COUNT; ii++) pBSSList->ldBmAverage[ii] = 0; } if ((pIE_Country != NULL) && (pMgmt->b11hEnable == true)) { set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse, pIE_Country); } if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) { if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) && (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) { // valid EID if (pQuiet == NULL) { pQuiet = (PWLAN_IE_QUIET)pIE_Quiet; CARDbSetQuiet( pMgmt->pAdapter, true, pQuiet->byQuietCount, pQuiet->byQuietPeriod, *((unsigned short *)pQuiet->abyQuietDuration), *((unsigned short *)pQuiet->abyQuietOffset) ); } else { pQuiet = (PWLAN_IE_QUIET)pIE_Quiet; CARDbSetQuiet( pMgmt->pAdapter, false, pQuiet->byQuietCount, pQuiet->byQuietPeriod, *((unsigned short *)pQuiet->abyQuietDuration), *((unsigned short *)pQuiet->abyQuietOffset) ); } } } if ((bParsingQuiet == true) && (pQuiet != NULL)) { CARDbStartQuiet(pMgmt->pAdapter); } pBSSList->uIELength = uIELength; if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN) pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN; memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength); return true; } /*+ * * Routine Description: * Update BSS set in known BSS list * * Return Value: * true if success. * -*/ // TODO: input structure modify bool BSSbUpdateToBSSList ( void *hDeviceContext, QWORD qwTimestamp, unsigned short wBeaconInterval, unsigned short wCapInfo, unsigned char byCurrChannel, bool bChannelHit, PWLAN_IE_SSID pSSID, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates, PERPObject psERP, PWLAN_IE_RSN pRSN, PWLAN_IE_RSN_EXT pRSNWPA, PWLAN_IE_COUNTRY pIE_Country, PWLAN_IE_QUIET pIE_Quiet, PKnownBSS pBSSList, unsigned int uIELength, unsigned char *pbyIEs, void *pRxPacketContext ) { int ii; PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext; long ldBm; bool bParsingQuiet = false; PWLAN_IE_QUIET pQuiet = NULL; if (pBSSList == NULL) return false; HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp)); LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp)); pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval); pBSSList->wCapInfo = cpu_to_le16(wCapInfo); pBSSList->uClearCount = 0; pBSSList->uChannel = byCurrChannel; // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel); if (pSSID->len > WLAN_SSID_MAXLEN) pSSID->len = WLAN_SSID_MAXLEN; if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0)) memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN); memcpy(pBSSList->abySuppRates, pSuppRates,pSuppRates->len + WLAN_IEHDR_LEN); if (pExtSuppRates != NULL) { memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,pExtSuppRates->len + WLAN_IEHDR_LEN); } else { memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); } pBSSList->sERP.byERP = psERP->byERP; pBSSList->sERP.bERPExist = psERP->bERPExist; // Check if BSS is 802.11a/b/g if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11A; } else { if (pBSSList->sERP.bERPExist == true) { pBSSList->eNetworkTypeInUse = PHY_TYPE_11G; } else { pBSSList->eNetworkTypeInUse = PHY_TYPE_11B; } } pBSSList->byRxRate = pRxPacket->byRxRate; pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF; if(bChannelHit) pBSSList->uRSSI = pRxPacket->uRSSI; pBSSList->bySQ = pRxPacket->bySQ; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { // assoc with BSS if (pBSSList == pMgmt->pCurrBSS) { bParsingQuiet = true; } } WPA_ClearRSN(pBSSList); //mike update if (pRSNWPA != NULL) { unsigned int uLen = pRSNWPA->len + 2; if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) { pBSSList->wWPALen = uLen; memcpy(pBSSList->byWPAIE, pRSNWPA, uLen); WPA_ParseRSN(pBSSList, pRSNWPA); } } WPA2_ClearRSN(pBSSList); //mike update if (pRSN != NULL) { unsigned int uLen = pRSN->len + 2; if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) { pBSSList->wRSNLen = uLen; memcpy(pBSSList->byRSNIE, pRSN, uLen); WPA2vParseRSN(pBSSList, pRSN); } } if (pRxPacket->uRSSI != 0) { RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm); // Moniter if RSSI is too strong. pBSSList->byRSSIStatCnt++; pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT; pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm; for(ii=0;ii<RSSI_STAT_COUNT;ii++) { if (pBSSList->ldBmAverage[ii] != 0) { pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm); } } } if ((pIE_Country != NULL) && (pMgmt->b11hEnable == true)) { set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse, pIE_Country); } if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) { if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) && (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) { // valid EID if (pQuiet == NULL) { pQuiet = (PWLAN_IE_QUIET)pIE_Quiet; CARDbSetQuiet( pMgmt->pAdapter, true, pQuiet->byQuietCount, pQuiet->byQuietPeriod, *((unsigned short *)pQuiet->abyQuietDuration), *((unsigned short *)pQuiet->abyQuietOffset) ); } else { pQuiet = (PWLAN_IE_QUIET)pIE_Quiet; CARDbSetQuiet( pMgmt->pAdapter, false, pQuiet->byQuietCount, pQuiet->byQuietPeriod, *((unsigned short *)pQuiet->abyQuietDuration), *((unsigned short *)pQuiet->abyQuietOffset) ); } } } if ((bParsingQuiet == true) && (pQuiet != NULL)) { CARDbStartQuiet(pMgmt->pAdapter); } pBSSList->uIELength = uIELength; if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN) pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN; memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength); return true; } /*+ * * Routine Description: * Search Node DB table to find the index of matched DstAddr * * Return Value: * None * -*/ bool BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr, unsigned int *puNodeIndex) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; unsigned int ii; // Index = 0 reserved for AP Node for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { if (!compare_ether_addr(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) { *puNodeIndex = ii; return true; } } } return false; }; /*+ * * Routine Description: * Find an empty node and allocated; if no empty found, * instand used of most inactive one. * * Return Value: * None * -*/ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int ii; unsigned int BigestCount = 0; unsigned int SelectIndex; struct sk_buff *skb; // Index = 0 reserved for AP Node (In STA mode) // Index = 0 reserved for Broadcast/MultiCast (In AP mode) SelectIndex = 1; for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) { BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount; SelectIndex = ii; } } else { break; } } // if not found replace uInActiveCount is largest one. if ( ii == (MAX_NODE_NUM + 1)) { *puNodeIndex = SelectIndex; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Replace inactive node = %d\n", SelectIndex); // clear ps buffer if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL) dev_kfree_skb(skb); } } else { *puNodeIndex = ii; } memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB)); pMgmt->sNodeDBTable[*puNodeIndex].bActive = true; pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND; // for AP mode PS queue skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue); pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0; pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii); return; }; /*+ * * Routine Description: * Remove Node by NodeIndex * * * Return Value: * None * -*/ void BSSvRemoveOneNode( void *hDeviceContext, unsigned int uNodeIndex ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; struct sk_buff *skb; while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL) dev_kfree_skb(skb); // clear context memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB)); // clear tx bit map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7]; return; }; /*+ * * Routine Description: * Update AP Node content in Index 0 of KnownNodeDB * * * Return Value: * None * -*/ void BSSvUpdateAPNode( void *hDeviceContext, unsigned short *pwCapInfo, PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uRateLen = WLAN_RATES_MAXLEN; memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB)); pMgmt->sNodeDBTable[0].bActive = true; if (pDevice->eCurrentPHYType == PHY_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, uRateLen); pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, uRateLen); RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true, &(pMgmt->sNodeDBTable[0].wMaxBasicRate), &(pMgmt->sNodeDBTable[0].wMaxSuppRate), &(pMgmt->sNodeDBTable[0].wSuppRate), &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate) ); memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate; pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo); pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND; #ifdef PLICE_DEBUG printk("BSSvUpdateAPNode:MaxSuppRate is %d\n",pMgmt->sNodeDBTable[0].wMaxSuppRate); #endif // Auto rate fallback function initiation. // RATEbInit(pDevice); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate); }; /*+ * * Routine Description: * Add Multicast Node content in Index 0 of KnownNodeDB * * * Return Value: * None * -*/ void BSSvAddMulticastNode( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; if (!pDevice->bEnableHostWEP) memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB)); memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[0].bActive = true; pMgmt->sNodeDBTable[0].bPSEnable = false; skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue); RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true, &(pMgmt->sNodeDBTable[0].wMaxBasicRate), &(pMgmt->sNodeDBTable[0].wMaxSuppRate), &(pMgmt->sNodeDBTable[0].wSuppRate), &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate) ); pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate; #ifdef PLICE_DEBUG printk("BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n",pMgmt->sNodeDBTable[0].wTxDataRate); #endif pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND; }; /*+ * * Routine Description: * * * Second call back function to update Node DB info & AP link status * * * Return Value: * none. * -*/ //2008-4-14 <add> by chester for led issue #ifdef FOR_LED_ON_NOTEBOOK bool cc=false; unsigned int status; #endif void BSSvSecondCallBack( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int ii; PWLAN_IE_SSID pItemSSID, pCurrSSID; unsigned int uSleepySTACnt = 0; unsigned int uNonShortSlotSTACnt = 0; unsigned int uLongPreambleSTACnt = 0; viawget_wpa_header* wpahdr; //DavidWang spin_lock_irq(&pDevice->lock); pDevice->uAssocCount = 0; pDevice->byERPFlag &= ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1)); //2008-4-14 <add> by chester for led issue #ifdef FOR_LED_ON_NOTEBOOK MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO); if ((( !(pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == false))||((pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == true)))&&(cc==false)){ cc=true; } else if(cc==true){ if(pDevice->bHWRadioOff == true){ if ( !(pDevice->byGPIO & GPIO0_DATA)) //||( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {if(status==1) goto start; status=1; CARDbRadioPowerOff(pDevice); pMgmt->sNodeDBTable[0].bActive = false; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pMgmt->eCurrState = WMAC_STATE_IDLE; //netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; } if (pDevice->byGPIO &GPIO0_DATA) //||( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {if(status==2) goto start; status=2; CARDbRadioPowerOn(pDevice); } } else{ if (pDevice->byGPIO & GPIO0_DATA) //||( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {if(status==3) goto start; status=3; CARDbRadioPowerOff(pDevice); pMgmt->sNodeDBTable[0].bActive = false; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pMgmt->eCurrState = WMAC_STATE_IDLE; //netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; } if ( !(pDevice->byGPIO & GPIO0_DATA)) //||( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {if(status==4) goto start; status=4; CARDbRadioPowerOn(pDevice); } } } start: #endif if (pDevice->wUseProtectCntDown > 0) { pDevice->wUseProtectCntDown --; } else { // disable protect mode pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1)); } { pDevice->byReAssocCount++; if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout printk("Re-association timeout!!!\n"); pDevice->byReAssocCount = 0; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } else if(pDevice->bLinkPass == true) pDevice->byReAssocCount = 0; } #ifdef Calcu_LinkQual s_uCalculateLinkQual((void *)pDevice); #endif for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { // Increase in-activity counter pMgmt->sNodeDBTable[ii].uInActiveCount++; if (ii > 0) { if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) { BSSvRemoveOneNode(pDevice, ii); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Inactive timeout [%d] sec, STA index = [%d] remove\n", MAX_INACTIVE_COUNT, ii); continue; } if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) { pDevice->uAssocCount++; // check if Non ERP exist if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) { if (!pMgmt->sNodeDBTable[ii].bShortPreamble) { pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1); uLongPreambleSTACnt ++; } if (!pMgmt->sNodeDBTable[ii].bERPExist) { pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1); pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1); } if (!pMgmt->sNodeDBTable[ii].bShortSlotTime) uNonShortSlotSTACnt++; } } // check if any STA in PS mode if (pMgmt->sNodeDBTable[ii].bPSEnable) uSleepySTACnt++; } // Rate fallback check if (!pDevice->bFixRate) { /* if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0)) RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii])); */ if (ii > 0) { // ii = 0 for multicast node (AP & Adhoc) RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii])); } else { // ii = 0 reserved for unicast AP node (Infra STA) if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) #ifdef PLICE_DEBUG printk("SecondCallback:Before:TxDataRate is %d\n",pMgmt->sNodeDBTable[0].wTxDataRate); #endif RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii])); #ifdef PLICE_DEBUG printk("SecondCallback:After:TxDataRate is %d\n",pMgmt->sNodeDBTable[0].wTxDataRate); #endif } } // check if pending PS queue if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); if ((ii >0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) { BSSvRemoveOneNode(pDevice, ii); DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii); continue; } } } } if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->eCurrentPHYType == PHY_TYPE_11G)) { // on/off protect mode if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) { if (!pDevice->bProtectMode) { MACvEnableProtectMD(pDevice->PortOffset); pDevice->bProtectMode = true; } } else { if (pDevice->bProtectMode) { MACvDisableProtectMD(pDevice->PortOffset); pDevice->bProtectMode = false; } } // on/off short slot time if (uNonShortSlotSTACnt > 0) { if (pDevice->bShortSlotTime) { pDevice->bShortSlotTime = false; BBvSetShortSlotTime(pDevice); vUpdateIFS((void *)pDevice); } } else { if (!pDevice->bShortSlotTime) { pDevice->bShortSlotTime = true; BBvSetShortSlotTime(pDevice); vUpdateIFS((void *)pDevice); } } // on/off barker long preamble mode if (uLongPreambleSTACnt > 0) { if (!pDevice->bBarkerPreambleMd) { MACvEnableBarkerPreambleMd(pDevice->PortOffset); pDevice->bBarkerPreambleMd = true; } } else { if (pDevice->bBarkerPreambleMd) { MACvDisableBarkerPreambleMd(pDevice->PortOffset); pDevice->bBarkerPreambleMd = false; } } } // Check if any STA in PS mode, enable DTIM multicast deliver if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (uSleepySTACnt > 0) pMgmt->sNodeDBTable[0].bPSEnable = true; else pMgmt->sNodeDBTable[0].bPSEnable = false; } pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) || (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) { if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS // DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount); //if (pDevice->bUpdateBBVGA) { // s_vCheckSensitivity((void *) pDevice); //} if (pDevice->bUpdateBBVGA) { // s_vCheckSensitivity((void *) pDevice); s_vCheckPreEDThreshold((void *)pDevice); } if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) && (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) { pDevice->byBBVGANew = pDevice->abyBBVGA[0]; bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL); } if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) { pMgmt->sNodeDBTable[0].bActive = false; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pMgmt->eCurrState = WMAC_STATE_IDLE; netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; pDevice->bRoaming = true; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount); if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DISASSOC_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } } else if (pItemSSID->len != 0) { if (pDevice->uAutoReConnectTime < 10) { pDevice->uAutoReConnectTime++; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //network manager support need not do Roaming scan??? if(pDevice->bWPASuppWextEnabled ==true) pDevice->uAutoReConnectTime = 0; #endif } else { //mike use old encryption status for wpa reauthen if(pDevice->bWPADEVUp) pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n"); BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); pDevice->uAutoReConnectTime = 0; } } } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { // if adhoc started which essid is NULL string, rescanning. if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) { if (pDevice->uAutoReConnectTime < 10) { pDevice->uAutoReConnectTime++; } else { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n"); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); pDevice->uAutoReConnectTime = 0; }; } if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (pDevice->bUpdateBBVGA) { //s_vCheckSensitivity((void *) pDevice); s_vCheckPreEDThreshold((void *)pDevice); } if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount); pMgmt->sNodeDBTable[0].uInActiveCount = 0; pMgmt->eCurrState = WMAC_STATE_STARTED; netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; } } } spin_unlock_irq(&pDevice->lock); pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ); add_timer(&pMgmt->sTimerSecondCallback); return; } /*+ * * Routine Description: * * * Update Tx attemps, Tx failure counter in Node DB * * * Return Value: * none. * -*/ void BSSvUpdateNodeTxCounter( void *hDeviceContext, unsigned char byTsr0, unsigned char byTsr1, unsigned char *pbyBuffer, unsigned int uFIFOHeaderSize ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uNodeIndex = 0; unsigned char byTxRetry = (byTsr0 & TSR0_NCR); PSTxBufHead pTxBufHead; PS802_11Header pMACHeader; unsigned short wRate; unsigned short wFallBackRate = RATE_1M; unsigned char byFallBack; unsigned int ii; // unsigned int txRetryTemp; //PLICE_DEBUG-> //txRetryTemp = byTxRetry; //if (txRetryTemp== 8) //txRetryTemp -=3; //PLICE_DEBUG <- pTxBufHead = (PSTxBufHead) pbyBuffer; if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0) { byFallBack = AUTO_FB_0; } else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1) { byFallBack = AUTO_FB_1; } else { byFallBack = AUTO_FB_NONE; } wRate = pTxBufHead->wReserved; //?wRate //printk("BSSvUpdateNodeTxCounter:byTxRetry is %d\n",byTxRetry); //printk("BSSvUpdateNodeTx:wRate is %d,byFallback is %d\n",wRate,byFallBack); //#ifdef PLICE_DEBUG //printk("BSSvUpdateNodeTx: wRate is %d\n",wRate); ////#endif // Only Unicast using support rates if (pTxBufHead->wFIFOCtl & FIFOCTL_NEEDACK) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wRate %04X, byTsr0 %02X, byTsr1 %02X\n", wRate, byTsr0, byTsr1); if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) { pMgmt->sNodeDBTable[0].uTxAttempts += 1; if ((byTsr1 & TSR1_TERR) == 0) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { wFallBackRate = wRate; } else if (byFallBack == AUTO_FB_0) { //PLICE_DEBUG if (byTxRetry < 5) //if (txRetryTemp < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry]; //wFallBackRate = awHWRetry0[wRate-RATE_12M][byTxRetry]; //wFallBackRate = awHWRetry0[wRate-RATE_18M][txRetryTemp] +1; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; //wFallBackRate = awHWRetry0[wRate-RATE_12M][4]; } else if (byFallBack == AUTO_FB_1) { if (byTxRetry < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; } pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++; } else { pMgmt->sNodeDBTable[0].uTxFailures ++; } pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry; if (byTxRetry != 0) { pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE]+=byTxRetry; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { pMgmt->sNodeDBTable[0].uTxFail[wRate]+=byTxRetry; } else if (byFallBack == AUTO_FB_0) { //PLICE_DEBUG for(ii=0;ii<byTxRetry;ii++) //for (ii=0;ii<txRetryTemp;ii++) { if (ii < 5) { //PLICE_DEBUG wFallBackRate = awHWRetry0[wRate-RATE_18M][ii]; //printk(" II is %d:BSSvUpdateNodeTx:wFallBackRate is %d\n",ii,wFallBackRate); //wFallBackRate = awHWRetry0[wRate-RATE_12M][ii]; } else { wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; //printk("ii is %d BSSvUpdateNodeTx:wFallBackRate is %d\n",ii,wFallBackRate); //wFallBackRate = awHWRetry0[wRate-RATE_12M][4]; } pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++; } } else if (byFallBack == AUTO_FB_1) { for(ii=0;ii<byTxRetry;ii++) { if (ii < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++; } } } } if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { pMACHeader = (PS802_11Header)(pbyBuffer + uFIFOHeaderSize); if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)){ pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1; if ((byTsr1 & TSR1_TERR) == 0) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { wFallBackRate = wRate; } else if (byFallBack == AUTO_FB_0) { if (byTxRetry < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; } else if (byFallBack == AUTO_FB_1) { if (byTxRetry < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; } pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++; } else { pMgmt->sNodeDBTable[uNodeIndex].uTxFailures ++; } pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry; if (byTxRetry != 0) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE]+=byTxRetry; if ( (byFallBack == AUTO_FB_NONE) || (wRate < RATE_18M) ) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate]+=byTxRetry; } else if (byFallBack == AUTO_FB_0) { for(ii=0;ii<byTxRetry;ii++) { if (ii < 5) wFallBackRate = awHWRetry0[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry0[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++; } } else if (byFallBack == AUTO_FB_1) { for(ii=0;ii<byTxRetry;ii++) { if (ii < 5) wFallBackRate = awHWRetry1[wRate-RATE_18M][ii]; else wFallBackRate = awHWRetry1[wRate-RATE_18M][4]; pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++; } } } } } } return; } /*+ * * Routine Description: * Clear Nodes & skb in DB Table * * * Parameters: * In: * hDeviceContext - The adapter context. * uStartIndex - starting index * Out: * none * * Return Value: * None. * -*/ void BSSvClearNodeDBTable( void *hDeviceContext, unsigned int uStartIndex ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; struct sk_buff *skb; unsigned int ii; for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive) { // check if sTxPSQueue has been initial if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii); dev_kfree_skb(skb); } } memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB)); } } return; }; void s_vCheckSensitivity( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PKnownBSS pBSSList = NULL; PSMgmtObject pMgmt = pDevice->pMgmt; int ii; if ((pDevice->byLocalID <= REV_ID_VT3253_A1) && (pDevice->byRFType == RF_RFMD2959) && (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) { return; } if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID); if (pBSSList != NULL) { // Updata BB Reg if RSSI is too strong. long LocalldBmAverage = 0; long uNumofdBm = 0; for (ii = 0; ii < RSSI_STAT_COUNT; ii++) { if (pBSSList->ldBmAverage[ii] != 0) { uNumofdBm ++; LocalldBmAverage += pBSSList->ldBmAverage[ii]; } } if (uNumofdBm > 0) { LocalldBmAverage = LocalldBmAverage/uNumofdBm; for (ii=0;ii<BB_VGA_LEVEL;ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n", LocalldBmAverage, pDevice->ldBmThreshold[ii], pDevice->abyBBVGA[ii]); if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) { pDevice->byBBVGANew = pDevice->abyBBVGA[ii]; break; } } if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) { pDevice->uBBVGADiffCount++; if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL); } else { pDevice->uBBVGADiffCount = 0; } } } } } void BSSvClearAnyBSSJoinRecord ( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int ii; for (ii = 0; ii < MAX_BSS_NUM; ii++) { pMgmt->sBSSList[ii].bSelected = false; } return; } #ifdef Calcu_LinkQual void s_uCalculateLinkQual( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; unsigned long TxOkRatio, TxCnt; unsigned long RxOkRatio,RxCnt; unsigned long RssiRatio; long ldBm; TxCnt = pDevice->scStatistic.TxNoRetryOkCount + pDevice->scStatistic.TxRetryOkCount + pDevice->scStatistic.TxFailCount; RxCnt = pDevice->scStatistic.RxFcsErrCnt + pDevice->scStatistic.RxOkCnt; TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt); RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt); //decide link quality if(pDevice->bLinkPass !=true) { // printk("s_uCalculateLinkQual-->Link disconnect and Poor quality**\n"); pDevice->scStatistic.LinkQuality = 0; pDevice->scStatistic.SignalStren = 0; } else { RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm); if(-ldBm < 50) { RssiRatio = 4000; } else if(-ldBm > 90) { RssiRatio = 0; } else { RssiRatio = (40-(-ldBm-50))*4000/40; } pDevice->scStatistic.SignalStren = RssiRatio/40; pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100; } pDevice->scStatistic.RxFcsErrCnt = 0; pDevice->scStatistic.RxOkCnt = 0; pDevice->scStatistic.TxFailCount = 0; pDevice->scStatistic.TxNoRetryOkCount = 0; pDevice->scStatistic.TxRetryOkCount = 0; return; } #endif void s_vCheckPreEDThreshold( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PKnownBSS pBSSList = NULL; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID); if (pBSSList != NULL) { pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1); //BBvUpdatePreEDThreshold(pDevice, false); } } return; }
gpl-2.0
z8cpaul/lsikernel-3.14
crypto/michael_mic.c
12277
3701
/* * Cryptographic API * * Michael MIC (IEEE 802.11i/TKIP) keyed digest * * Copyright (c) 2004 Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <crypto/internal/hash.h> #include <asm/byteorder.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> struct michael_mic_ctx { u32 l, r; }; struct michael_mic_desc_ctx { u8 pending[4]; size_t pending_len; u32 l, r; }; static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rol32(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rol32(l, 3); \ l += r; \ r ^= ror32(l, 2); \ l += r; \ } while (0) static int michael_init(struct shash_desc *desc) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm); mctx->pending_len = 0; mctx->l = ctx->l; mctx->r = ctx->r; return 0; } static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); const __le32 *src; if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; memcpy(&mctx->pending[mctx->pending_len], data, flen); mctx->pending_len += flen; data += flen; len -= flen; if (mctx->pending_len < 4) return 0; src = (const __le32 *)mctx->pending; mctx->l ^= le32_to_cpup(src); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } src = (const __le32 *)data; while (len >= 4) { mctx->l ^= le32_to_cpup(src++); michael_block(mctx->l, mctx->r); len -= 4; } if (len > 0) { mctx->pending_len = len; memcpy(mctx->pending, src, len); } return 0; } static int michael_final(struct shash_desc *desc, u8 *out) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); u8 *data = mctx->pending; __le32 *dst = (__le32 *)out; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { case 0: mctx->l ^= 0x5a; break; case 1: mctx->l ^= data[0] | 0x5a00; break; case 2: mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; break; case 3: mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | 0x5a000000; break; } michael_block(mctx->l, mctx->r); /* l ^= 0; */ michael_block(mctx->l, mctx->r); dst[0] = cpu_to_le32(mctx->l); dst[1] = cpu_to_le32(mctx->r); return 0; } static int michael_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); const __le32 *data = (const __le32 *)key; if (keylen != 8) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->l = le32_to_cpu(data[0]); mctx->r = le32_to_cpu(data[1]); return 0; } static struct shash_alg alg = { .digestsize = 8, .setkey = michael_setkey, .init = michael_init, .update = michael_update, .final = michael_final, .descsize = sizeof(struct michael_mic_desc_ctx), .base = { .cra_name = "michael_mic", .cra_blocksize = 8, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, } }; static int __init michael_mic_init(void) { return crypto_register_shash(&alg); } static void __exit michael_mic_exit(void) { crypto_unregister_shash(&alg); } module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
gpl-2.0
ircncl/linux-grsec-incremental
drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
246
7416
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/gpuobj.h> #include <subdev/timer.h> #include <subdev/fb.h> #include <subdev/vm.h> #include "priv.h" struct nv50_bar_priv { struct nouveau_bar base; spinlock_t lock; struct nouveau_gpuobj *mem; struct nouveau_gpuobj *pad; struct nouveau_gpuobj *pgd; struct nouveau_vm *bar1_vm; struct nouveau_gpuobj *bar1; struct nouveau_vm *bar3_vm; struct nouveau_gpuobj *bar3; }; static int nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem, u32 flags, struct nouveau_vma *vma) { struct nv50_bar_priv *priv = (void *)bar; int ret; ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma); if (ret) return ret; nouveau_vm_map(vma, mem); return 0; } static int nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem, u32 flags, struct nouveau_vma *vma) { struct nv50_bar_priv *priv = (void *)bar; int ret; ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma); if (ret) return ret; nouveau_vm_map(vma, mem); return 0; } static void nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); } static void nv50_bar_flush(struct nouveau_bar *bar) { struct nv50_bar_priv *priv = (void *)bar; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); nv_wr32(priv, 0x00330c, 0x00000001); if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000)) nv_warn(priv, "flush timeout\n"); spin_unlock_irqrestore(&priv->lock, flags); } void nv84_bar_flush(struct nouveau_bar *bar) { struct nv50_bar_priv *priv = (void *)bar; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); nv_wr32(bar, 0x070000, 0x00000001); if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) nv_warn(priv, "flush timeout\n"); spin_unlock_irqrestore(&priv->lock, flags); } static int nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_object *heap; struct nouveau_vm *vm; struct nv50_bar_priv *priv; u64 start, limit; int ret; ret = nouveau_bar_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, NVOBJ_FLAG_HEAP, &priv->mem); heap = nv_object(priv->mem); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), heap, (device->chipset == 0x50) ? 0x1400 : 0x0200, 0, 0, &priv->pad); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd); if (ret) return ret; /* BAR3 */ start = 0x0100000000ULL; limit = start + pci_resource_len(device->pdev, 3); ret = nouveau_vm_new(device, start, limit, start, &vm); if (ret) return ret; atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); ret = nouveau_gpuobj_new(nv_object(priv), heap, ((limit-- - start) >> 12) * 8, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); vm->pgt[0].refcount[0] = 1; if (ret) return ret; ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd); nouveau_vm_ref(NULL, &vm, NULL); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3); if (ret) return ret; nv_wo32(priv->bar3, 0x00, 0x7fc00000); nv_wo32(priv->bar3, 0x04, lower_32_bits(limit)); nv_wo32(priv->bar3, 0x08, lower_32_bits(start)); nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 | upper_32_bits(start)); nv_wo32(priv->bar3, 0x10, 0x00000000); nv_wo32(priv->bar3, 0x14, 0x00000000); /* BAR1 */ start = 0x0000000000ULL; limit = start + pci_resource_len(device->pdev, 1); ret = nouveau_vm_new(device, start, limit--, start, &vm); if (ret) return ret; atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd); nouveau_vm_ref(NULL, &vm, NULL); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1); if (ret) return ret; nv_wo32(priv->bar1, 0x00, 0x7fc00000); nv_wo32(priv->bar1, 0x04, lower_32_bits(limit)); nv_wo32(priv->bar1, 0x08, lower_32_bits(start)); nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 | upper_32_bits(start)); nv_wo32(priv->bar1, 0x10, 0x00000000); nv_wo32(priv->bar1, 0x14, 0x00000000); priv->base.alloc = nouveau_bar_alloc; priv->base.kmap = nv50_bar_kmap; priv->base.umap = nv50_bar_umap; priv->base.unmap = nv50_bar_unmap; if (device->chipset == 0x50) priv->base.flush = nv50_bar_flush; else priv->base.flush = nv84_bar_flush; spin_lock_init(&priv->lock); return 0; } static void nv50_bar_dtor(struct nouveau_object *object) { struct nv50_bar_priv *priv = (void *)object; nouveau_gpuobj_ref(NULL, &priv->bar1); nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd); nouveau_gpuobj_ref(NULL, &priv->bar3); if (priv->bar3_vm) { nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd); } nouveau_gpuobj_ref(NULL, &priv->pgd); nouveau_gpuobj_ref(NULL, &priv->pad); nouveau_gpuobj_ref(NULL, &priv->mem); nouveau_bar_destroy(&priv->base); } static int nv50_bar_init(struct nouveau_object *object) { struct nv50_bar_priv *priv = (void *)object; int ret; ret = nouveau_bar_init(&priv->base); if (ret) return ret; nv_mask(priv, 0x000200, 0x00000100, 0x00000000); nv_mask(priv, 0x000200, 0x00000100, 0x00000100); nv_wr32(priv, 0x100c80, 0x00060001); if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) { nv_error(priv, "vm flush timeout\n"); return -EBUSY; } nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12); nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); return 0; } static int nv50_bar_fini(struct nouveau_object *object, bool suspend) { struct nv50_bar_priv *priv = (void *)object; return nouveau_bar_fini(&priv->base, suspend); } struct nouveau_oclass nv50_bar_oclass = { .handle = NV_SUBDEV(BAR, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_bar_ctor, .dtor = nv50_bar_dtor, .init = nv50_bar_init, .fini = nv50_bar_fini, }, };
gpl-2.0
rbauduin/mptcp
drivers/s390/block/dcssblk.c
246
26859
/* * dcssblk.c -- the S/390 block driver for dcss memory * * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer */ #define KMSG_COMPONENT "dcssblk" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/extmem.h> #include <asm/io.h> #define DCSSBLK_NAME "dcssblk" #define DCSSBLK_MINORS_PER_DISK 1 #define DCSSBLK_PARM_LEN 400 #define DCSS_BUS_ID_SIZE 20 static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); static void dcssblk_make_request(struct request_queue *q, struct bio *bio); static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, void **kaddr, unsigned long *pfn); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, .open = dcssblk_open, .release = dcssblk_release, .direct_access = dcssblk_direct_access, }; struct dcssblk_dev_info { struct list_head lh; struct device dev; char segment_name[DCSS_BUS_ID_SIZE]; atomic_t use_count; struct gendisk *gd; unsigned long start; unsigned long end; int segment_type; unsigned char save_pending; unsigned char is_shared; struct request_queue *dcssblk_queue; int num_of_segments; struct list_head seg_list; }; struct segment_info { struct list_head lh; char segment_name[DCSS_BUS_ID_SIZE]; unsigned long start; unsigned long end; int segment_type; }; static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); static struct device *dcssblk_root_dev; static LIST_HEAD(dcssblk_devices); static struct rw_semaphore dcssblk_devices_sem; /* * release function for segment device. */ static void dcssblk_release_segment(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; dev_info = container_of(dev, struct dcssblk_dev_info, dev); list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { list_del(&entry->lh); kfree(entry); } kfree(dev_info); module_put(THIS_MODULE); } /* * get a minor number. needs to be called with * down_write(&dcssblk_devices_sem) and the * device needs to be enqueued before the semaphore is * freed. */ static int dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) { int minor, found; struct dcssblk_dev_info *entry; if (dev_info == NULL) return -EINVAL; for (minor = 0; minor < (1<<MINORBITS); minor++) { found = 0; // test if minor available list_for_each_entry(entry, &dcssblk_devices, lh) if (minor == entry->gd->first_minor) found++; if (!found) break; // got unused minor } if (found) return -EBUSY; dev_info->gd->first_minor = minor; return 0; } /* * get the struct dcssblk_dev_info from dcssblk_devices * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct dcssblk_dev_info * dcssblk_get_device_by_name(char *name) { struct dcssblk_dev_info *entry; list_for_each_entry(entry, &dcssblk_devices, lh) { if (!strcmp(name, entry->segment_name)) { return entry; } } return NULL; } /* * get the struct segment_info from seg_list * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct segment_info * dcssblk_get_segment_by_name(char *name) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { if (!strcmp(name, entry->segment_name)) return entry; } } return NULL; } /* * get the highest address of the multi-segment block. */ static unsigned long dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) { unsigned long highest_addr; struct segment_info *entry; highest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (highest_addr < entry->end) highest_addr = entry->end; } return highest_addr; } /* * get the lowest address of the multi-segment block. */ static unsigned long dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) { int set_first; unsigned long lowest_addr; struct segment_info *entry; set_first = 0; lowest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (set_first == 0) { lowest_addr = entry->start; set_first = 1; } else { if (lowest_addr > entry->start) lowest_addr = entry->start; } } return lowest_addr; } /* * Check continuity of segments. */ static int dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) { int i, j, rc; struct segment_info *sort_list, *entry, temp; if (dev_info->num_of_segments <= 1) return 0; sort_list = kzalloc( sizeof(struct segment_info) * dev_info->num_of_segments, GFP_KERNEL); if (sort_list == NULL) return -ENOMEM; i = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { memcpy(&sort_list[i], entry, sizeof(struct segment_info)); i++; } /* sort segments */ for (i = 0; i < dev_info->num_of_segments; i++) for (j = 0; j < dev_info->num_of_segments; j++) if (sort_list[j].start > sort_list[i].start) { memcpy(&temp, &sort_list[i], sizeof(struct segment_info)); memcpy(&sort_list[i], &sort_list[j], sizeof(struct segment_info)); memcpy(&sort_list[j], &temp, sizeof(struct segment_info)); } /* check continuity */ for (i = 0; i < dev_info->num_of_segments - 1; i++) { if ((sort_list[i].end + 1) != sort_list[i+1].start) { pr_err("Adjacent DCSSs %s and %s are not " "contiguous\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } /* EN and EW are allowed in a block device */ if (sort_list[i].segment_type != sort_list[i+1].segment_type) { if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i].segment_type == SEG_TYPE_ER) || !(sort_list[i+1].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i+1].segment_type == SEG_TYPE_ER)) { pr_err("DCSS %s and DCSS %s have " "incompatible types\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } } } rc = 0; out: kfree(sort_list); return rc; } /* * Load a segment */ static int dcssblk_load_segment(char *name, struct segment_info **seg_info) { int rc; /* already loaded? */ down_read(&dcssblk_devices_sem); *seg_info = dcssblk_get_segment_by_name(name); up_read(&dcssblk_devices_sem); if (*seg_info != NULL) return -EEXIST; /* get a struct segment_info */ *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); if (*seg_info == NULL) return -ENOMEM; strcpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, &(*seg_info)->start, &(*seg_info)->end); if (rc < 0) { segment_warning(rc, (*seg_info)->segment_name); kfree(*seg_info); } else { INIT_LIST_HEAD(&(*seg_info)->lh); (*seg_info)->segment_type = rc; } return rc; } static void dcssblk_unregister_callback(struct device *dev) { device_unregister(dev); put_device(dev); } /* * device attribute for switching shared/nonshared (exclusive) * operation (show + store) */ static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n"); } static ssize_t dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; int rc; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; down_write(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); if (atomic_read(&dev_info->use_count)) { rc = -EBUSY; goto out; } if (inbuf[0] == '1') { /* reload segments in shared mode */ list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_SHARED); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 1; switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd, 1); } } else if (inbuf[0] == '0') { /* reload segments in exclusive mode */ if (dev_info->segment_type == SEG_TYPE_SC) { pr_err("DCSS %s is of type SC and cannot be " "loaded as exclusive-writable\n", dev_info->segment_name); rc = -EINVAL; goto out; } list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_EXCLUSIVE); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 0; set_disk_ro(dev_info->gd, 0); } else { rc = -EINVAL; goto out; } rc = count; goto out; removeseg: pr_err("DCSS device %s is removed after a failed access mode " "change\n", dev_info->segment_name); temp = entry; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry != temp) segment_unload(entry->segment_name); } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); rc = device_schedule_callback(dev, dcssblk_unregister_callback); out: up_write(&dcssblk_devices_sem); return rc; } static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, dcssblk_shared_store); /* * device attribute for save operation on current copy * of the segment. If the segment is busy, saving will * become pending until it gets released, which can be * undone by storing a non-true value to this entry. * (show + store) */ static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n"); } static ssize_t dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; dev_info = container_of(dev, struct dcssblk_dev_info, dev); down_write(&dcssblk_devices_sem); if (inbuf[0] == '1') { if (atomic_read(&dev_info->use_count) == 0) { // device is idle => we save immediately pr_info("All DCSSs that map to device %s are " "saved\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } } else { // device is busy => we save it when it becomes // idle in dcssblk_release pr_info("Device %s is in use, its DCSSs will be " "saved when it becomes idle\n", dev_info->segment_name); dev_info->save_pending = 1; } } else if (inbuf[0] == '0') { if (dev_info->save_pending) { // device is busy & the user wants to undo his save // request dev_info->save_pending = 0; pr_info("A pending save request for device %s " "has been canceled\n", dev_info->segment_name); } } else { up_write(&dcssblk_devices_sem); return -EINVAL; } up_write(&dcssblk_devices_sem); return count; } static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, dcssblk_save_store); /* * device attribute for showing all segments in a device */ static ssize_t dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, char *buf) { int i; struct dcssblk_dev_info *dev_info; struct segment_info *entry; down_read(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); i = 0; buf[0] = '\0'; list_for_each_entry(entry, &dev_info->seg_list, lh) { strcpy(&buf[i], entry->segment_name); i += strlen(entry->segment_name); buf[i] = '\n'; i++; } up_read(&dcssblk_devices_sem); return i; } static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); static struct attribute *dcssblk_dev_attrs[] = { &dev_attr_shared.attr, &dev_attr_save.attr, &dev_attr_seglist.attr, NULL, }; static struct attribute_group dcssblk_dev_attr_group = { .attrs = dcssblk_dev_attrs, }; static const struct attribute_group *dcssblk_dev_attr_groups[] = { &dcssblk_dev_attr_group, NULL, }; /* * device attribute for adding devices */ static ssize_t dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; char *local_buf; unsigned long seg_byte_size; dev_info = NULL; seg_info = NULL; if (dev != dcssblk_root_dev) { rc = -EINVAL; goto out_nobuf; } if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { rc = -ENAMETOOLONG; goto out_nobuf; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { rc = -ENOMEM; goto out_nobuf; } /* * parse input */ num_of_segments = 0; for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { for (j = i; (buf[j] != ':') && (buf[j] != '\0') && (buf[j] != '\n') && j < count; j++) { local_buf[j-i] = toupper(buf[j]); } local_buf[j-i] = '\0'; if (((j - i) == 0) || ((j - i) > 8)) { rc = -ENAMETOOLONG; goto seg_list_del; } rc = dcssblk_load_segment(local_buf, &seg_info); if (rc < 0) goto seg_list_del; /* * get a struct dcssblk_dev_info */ if (num_of_segments == 0) { dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); if (dev_info == NULL) { rc = -ENOMEM; goto out; } strcpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } list_add_tail(&seg_info->lh, &dev_info->seg_list); num_of_segments++; i = j; if ((buf[j] == '\0') || (buf[j] == '\n')) break; } /* no trailing colon at the end of the input */ if ((i > 0) && (buf[i-1] == ':')) { rc = -ENAMETOOLONG; goto seg_list_del; } strlcpy(local_buf, buf, i + 1); dev_info->num_of_segments = num_of_segments; rc = dcssblk_is_continuous(dev_info); if (rc < 0) goto seg_list_del; dev_info->start = dcssblk_find_lowest_addr(dev_info); dev_info->end = dcssblk_find_highest_addr(dev_info); dev_set_name(&dev_info->dev, dev_info->segment_name); dev_info->dev.release = dcssblk_release_segment; dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); if (dev_info->gd == NULL) { rc = -ENOMEM; goto seg_list_del; } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; dev_info->gd->driverfs_dev = &dev_info->dev; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors pr_info("Loaded %s with total size %lu bytes and capacity %lu " "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); dev_info->save_pending = 0; dev_info->is_shared = 1; dev_info->dev.parent = dcssblk_root_dev; /* *get minor, add to list */ down_write(&dcssblk_devices_sem); if (dcssblk_get_segment_by_name(local_buf)) { rc = -EEXIST; goto release_gd; } rc = dcssblk_assign_free_minor(dev_info); if (rc) goto release_gd; sprintf(dev_info->gd->disk_name, "dcssblk%d", dev_info->gd->first_minor); list_add_tail(&dev_info->lh, &dcssblk_devices); if (!try_module_get(THIS_MODULE)) { rc = -ENODEV; goto dev_list_del; } /* * register the device */ rc = device_register(&dev_info->dev); if (rc) goto put_dev; get_device(&dev_info->dev); add_disk(dev_info->gd); switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd,1); break; default: set_disk_ro(dev_info->gd,0); break; } up_write(&dcssblk_devices_sem); rc = count; goto out; put_dev: list_del(&dev_info->lh); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } put_device(&dev_info->dev); up_write(&dcssblk_devices_sem); goto out; dev_list_del: list_del(&dev_info->lh); release_gd: blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); seg_list_del: if (dev_info == NULL) goto out; list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { list_del(&seg_info->lh); segment_unload(seg_info->segment_name); kfree(seg_info); } kfree(dev_info); out: kfree(local_buf); out_nobuf: return rc; } /* * device attribute for removing devices */ static ssize_t dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; int rc, i; char *local_buf; if (dev != dcssblk_root_dev) { return -EINVAL; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { return -ENOMEM; } /* * parse input */ for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) { local_buf[i] = toupper(buf[i]); } local_buf[i] = '\0'; if ((i == 0) || (i > 8)) { rc = -ENAMETOOLONG; goto out_buf; } down_write(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(local_buf); if (dev_info == NULL) { up_write(&dcssblk_devices_sem); pr_warning("Device %s cannot be removed because it is not a " "known device\n", local_buf); rc = -ENODEV; goto out_buf; } if (atomic_read(&dev_info->use_count) != 0) { up_write(&dcssblk_devices_sem); pr_warning("Device %s cannot be removed while it is in " "use\n", local_buf); rc = -EBUSY; goto out_buf; } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); device_unregister(&dev_info->dev); /* unload all related segments */ list_for_each_entry(entry, &dev_info->seg_list, lh) segment_unload(entry->segment_name); put_device(&dev_info->dev); up_write(&dcssblk_devices_sem); rc = count; out_buf: kfree(local_buf); return rc; } static int dcssblk_open(struct block_device *bdev, fmode_t mode) { struct dcssblk_dev_info *dev_info; int rc; dev_info = bdev->bd_disk->private_data; if (NULL == dev_info) { rc = -ENODEV; goto out; } atomic_inc(&dev_info->use_count); bdev->bd_block_size = 4096; rc = 0; out: return rc; } static void dcssblk_release(struct gendisk *disk, fmode_t mode) { struct dcssblk_dev_info *dev_info = disk->private_data; struct segment_info *entry; if (!dev_info) { WARN_ON(1); return; } down_write(&dcssblk_devices_sem); if (atomic_dec_and_test(&dev_info->use_count) && (dev_info->save_pending)) { pr_info("Device %s has become idle and is being saved " "now\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_save(entry->segment_name); } dev_info->save_pending = 0; } up_write(&dcssblk_devices_sem); } static void dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec bvec; struct bvec_iter iter; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { /* Request beyond end of DCSS segment. */ goto fail; } /* verify data transfer direction */ if (dev_info->is_shared) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: /* cannot write to these segments */ if (bio_data_dir(bio) == WRITE) { pr_warning("Writing to %s failed because it " "is a read-only device\n", dev_name(&dev_info->dev)); goto fail; } } } index = (bio->bi_iter.bi_sector >> 3); bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) page_address(bvec.bv_page) + bvec.bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, bvec.bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, bvec.bv_len); } bytes_done += bvec.bv_len; } bio_endio(bio, 0); return; fail: bio_io_error(bio); } static int dcssblk_direct_access (struct block_device *bdev, sector_t secnum, void **kaddr, unsigned long *pfn) { struct dcssblk_dev_info *dev_info; unsigned long pgoff; dev_info = bdev->bd_disk->private_data; if (!dev_info) return -ENODEV; if (secnum % (PAGE_SIZE/512)) return -EINVAL; pgoff = secnum / (PAGE_SIZE / 512); if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start) return -ERANGE; *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE); *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; return 0; } static void dcssblk_check_params(void) { int rc, i, j, k; char buf[DCSSBLK_PARM_LEN + 1]; struct dcssblk_dev_info *dev_info; for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); i++) { for (j = i; (dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0') && (dcssblk_segments[j] != '(') && (j < DCSSBLK_PARM_LEN); j++) { buf[j-i] = dcssblk_segments[j]; } buf[j-i] = '\0'; rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); if ((rc >= 0) && (dcssblk_segments[j] == '(')) { for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) buf[k] = toupper(buf[k]); buf[k] = '\0'; if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { down_read(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(buf); up_read(&dcssblk_devices_sem); if (dev_info) dcssblk_shared_store(&dev_info->dev, NULL, "0\n", 2); } } while ((dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0')) { j++; } if (dcssblk_segments[j] == '\0') break; i = j; } } /* * Suspend / Resume */ static int dcssblk_freeze(struct device *dev) { struct dcssblk_dev_info *dev_info; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: if (!dev_info->is_shared) rc = -EINVAL; break; default: rc = -EINVAL; break; } if (rc) break; } if (rc) pr_err("Suspending the system failed because DCSS device %s " "is writable\n", dev_info->segment_name); return rc; } static int dcssblk_restore(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; unsigned long start, end; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_unload(entry->segment_name); rc = segment_load(entry->segment_name, SEGMENT_SHARED, &start, &end); if (rc < 0) { // TODO in_use check ? segment_warning(rc, entry->segment_name); goto out_panic; } if (start != entry->start || end != entry->end) { pr_err("The address range of DCSS %s changed " "while the system was suspended\n", entry->segment_name); goto out_panic; } } } return 0; out_panic: panic("fatal dcssblk resume error\n"); } static int dcssblk_thaw(struct device *dev) { return 0; } static const struct dev_pm_ops dcssblk_pm_ops = { .freeze = dcssblk_freeze, .thaw = dcssblk_thaw, .restore = dcssblk_restore, }; static struct platform_driver dcssblk_pdrv = { .driver = { .name = "dcssblk", .owner = THIS_MODULE, .pm = &dcssblk_pm_ops, }, }; static struct platform_device *dcssblk_pdev; /* * The init/exit functions. */ static void __exit dcssblk_exit(void) { platform_device_unregister(dcssblk_pdev); platform_driver_unregister(&dcssblk_pdrv); root_device_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); } static int __init dcssblk_init(void) { int rc; rc = platform_driver_register(&dcssblk_pdrv); if (rc) return rc; dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, 0); if (IS_ERR(dcssblk_pdev)) { rc = PTR_ERR(dcssblk_pdev); goto out_pdrv; } dcssblk_root_dev = root_device_register("dcssblk"); if (IS_ERR(dcssblk_root_dev)) { rc = PTR_ERR(dcssblk_root_dev); goto out_pdev; } rc = device_create_file(dcssblk_root_dev, &dev_attr_add); if (rc) goto out_root; rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); if (rc) goto out_root; rc = register_blkdev(0, DCSSBLK_NAME); if (rc < 0) goto out_root; dcssblk_major = rc; init_rwsem(&dcssblk_devices_sem); dcssblk_check_params(); return 0; out_root: root_device_unregister(dcssblk_root_dev); out_pdev: platform_device_unregister(dcssblk_pdev); out_pdrv: platform_driver_unregister(&dcssblk_pdrv); return rc; } module_init(dcssblk_init); module_exit(dcssblk_exit); module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " "comma-separated list, names in each set separated " "by commas are separated by colons, each set contains " "names of contiguous segments and each name max. 8 chars.\n" "Adding \"(local)\" to the end of each set equals echoing 0 " "to /sys/devices/dcssblk/<device name>/shared after loading " "the contiguous segments - \n" "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); MODULE_LICENSE("GPL");
gpl-2.0
dantes1984/linux
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
502
3515
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "dmacnv50.h" #include "rootnv50.h" #include <core/ramht.h> #include <subdev/timer.h> int gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, struct nvkm_object *object, u32 handle) { return nvkm_ramht_insert(chan->base.root->ramht, object, chan->base.chid, -9, handle, chan->base.chid << 27 | 0x00000001); } static void gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; int chid = chan->base.chid; /* deactivate channel */ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); if (nvkm_msec(device, 2000, if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) break; ) < 0) { nvkm_error(subdev, "ch %d fini: %08x\n", chid, nvkm_rd32(device, 0x610490 + (chid * 0x10))); } /* disable error reporting and completion notification */ nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); } static int gf119_disp_dmac_init(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; int chid = chan->base.chid; /* enable error reporting */ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); /* initialise channel for dma command submission */ nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); /* wait for it to go inactive */ if (nvkm_msec(device, 2000, if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) break; ) < 0) { nvkm_error(subdev, "ch %d init: %08x\n", chid, nvkm_rd32(device, 0x610490 + (chid * 0x10))); return -EBUSY; } return 0; } const struct nv50_disp_dmac_func gf119_disp_dmac_func = { .init = gf119_disp_dmac_init, .fini = gf119_disp_dmac_fini, .bind = gf119_disp_dmac_bind, };
gpl-2.0
pershoot/vision-2635
Documentation/lguest/lguest.c
758
59145
/*P:100 * This is the Launcher code, a simple program which lays out the "physical" * memory for the new Guest by mapping the kernel image and the virtual * devices, then opens /dev/lguest to tell the kernel about the Guest and * control it. :*/ #define _LARGEFILE64_SOURCE #define _GNU_SOURCE #include <stdio.h> #include <string.h> #include <unistd.h> #include <err.h> #include <stdint.h> #include <stdlib.h> #include <elf.h> #include <sys/mman.h> #include <sys/param.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/eventfd.h> #include <fcntl.h> #include <stdbool.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <sys/time.h> #include <time.h> #include <netinet/in.h> #include <net/if.h> #include <linux/sockios.h> #include <linux/if_tun.h> #include <sys/uio.h> #include <termios.h> #include <getopt.h> #include <assert.h> #include <sched.h> #include <limits.h> #include <stddef.h> #include <signal.h> #include "linux/lguest_launcher.h" #include "linux/virtio_config.h" #include "linux/virtio_net.h" #include "linux/virtio_blk.h" #include "linux/virtio_console.h" #include "linux/virtio_rng.h" #include "linux/virtio_ring.h" #include "asm/bootparam.h" /*L:110 * We can ignore the 42 include files we need for this program, but I do want * to draw attention to the use of kernel-style types. * * As Linus said, "C is a Spartan language, and so should your naming be." I * like these abbreviations, so we define them here. Note that u64 is always * unsigned long long, which works on all Linux systems: this means that we can * use %llu in printf for any u64. */ typedef unsigned long long u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; /*:*/ #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ #define BRIDGE_PFX "bridge:" #ifndef SIOCBRADDIF #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ #endif /* We can have up to 256 pages for devices. */ #define DEVICE_PAGES 256 /* This will occupy 3 pages: it must be a power of 2. */ #define VIRTQUEUE_NUM 256 /*L:120 * verbose is both a global flag and a macro. The C preprocessor allows * this, and although I wouldn't recommend it, it works quite nicely here. */ static bool verbose; #define verbose(args...) \ do { if (verbose) printf(args); } while(0) /*:*/ /* The pointer to the start of guest memory. */ static void *guest_base; /* The maximum guest physical address allowed, and maximum possible. */ static unsigned long guest_limit, guest_max; /* The /dev/lguest file descriptor. */ static int lguest_fd; /* a per-cpu variable indicating whose vcpu is currently running */ static unsigned int __thread cpu_id; /* This is our list of devices. */ struct device_list { /* Counter to assign interrupt numbers. */ unsigned int next_irq; /* Counter to print out convenient device numbers. */ unsigned int device_num; /* The descriptor page for the devices. */ u8 *descpage; /* A single linked list of devices. */ struct device *dev; /* And a pointer to the last device for easy append. */ struct device *lastdev; }; /* The list of Guest devices, based on command line arguments. */ static struct device_list devices; /* The device structure describes a single device. */ struct device { /* The linked-list pointer. */ struct device *next; /* The device's descriptor, as mapped into the Guest. */ struct lguest_device_desc *desc; /* We can't trust desc values once Guest has booted: we use these. */ unsigned int feature_len; unsigned int num_vq; /* The name of this device, for --verbose. */ const char *name; /* Any queues attached to this device */ struct virtqueue *vq; /* Is it operational */ bool running; /* Does Guest want an intrrupt on empty? */ bool irq_on_empty; /* Device-specific data. */ void *priv; }; /* The virtqueue structure describes a queue attached to a device. */ struct virtqueue { struct virtqueue *next; /* Which device owns me. */ struct device *dev; /* The configuration for this queue. */ struct lguest_vqconfig config; /* The actual ring of buffers. */ struct vring vring; /* Last available index we saw. */ u16 last_avail_idx; /* How many are used since we sent last irq? */ unsigned int pending_used; /* Eventfd where Guest notifications arrive. */ int eventfd; /* Function for the thread which is servicing this virtqueue. */ void (*service)(struct virtqueue *vq); pid_t thread; }; /* Remember the arguments to the program so we can "reboot" */ static char **main_args; /* The original tty settings to restore on exit. */ static struct termios orig_term; /* * We have to be careful with barriers: our devices are all run in separate * threads and so we need to make sure that changes visible to the Guest happen * in precise order. */ #define wmb() __asm__ __volatile__("" : : : "memory") #define mb() __asm__ __volatile__("" : : : "memory") /* * Convert an iovec element to the given type. * * This is a fairly ugly trick: we need to know the size of the type and * alignment requirement to check the pointer is kosher. It's also nice to * have the name of the type in case we report failure. * * Typing those three things all the time is cumbersome and error prone, so we * have a macro which sets them all up and passes to the real function. */ #define convert(iov, type) \ ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) static void *_convert(struct iovec *iov, size_t size, size_t align, const char *name) { if (iov->iov_len != size) errx(1, "Bad iovec size %zu for %s", iov->iov_len, name); if ((unsigned long)iov->iov_base % align != 0) errx(1, "Bad alignment %p for %s", iov->iov_base, name); return iov->iov_base; } /* Wrapper for the last available index. Makes it easier to change. */ #define lg_last_avail(vq) ((vq)->last_avail_idx) /* * The virtio configuration space is defined to be little-endian. x86 is * little-endian too, but it's nice to be explicit so we have these helpers. */ #define cpu_to_le16(v16) (v16) #define cpu_to_le32(v32) (v32) #define cpu_to_le64(v64) (v64) #define le16_to_cpu(v16) (v16) #define le32_to_cpu(v32) (v32) #define le64_to_cpu(v64) (v64) /* Is this iovec empty? */ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) { unsigned int i; for (i = 0; i < num_iov; i++) if (iov[i].iov_len) return false; return true; } /* Take len bytes from the front of this iovec. */ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) { unsigned int i; for (i = 0; i < num_iov; i++) { unsigned int used; used = iov[i].iov_len < len ? iov[i].iov_len : len; iov[i].iov_base += used; iov[i].iov_len -= used; len -= used; } assert(len == 0); } /* The device virtqueue descriptors are followed by feature bitmasks. */ static u8 *get_feature_bits(struct device *dev) { return (u8 *)(dev->desc + 1) + dev->num_vq * sizeof(struct lguest_vqconfig); } /*L:100 * The Launcher code itself takes us out into userspace, that scary place where * pointers run wild and free! Unfortunately, like most userspace programs, * it's quite boring (which is why everyone likes to hack on the kernel!). * Perhaps if you make up an Lguest Drinking Game at this point, it will get * you through this section. Or, maybe not. * * The Launcher sets up a big chunk of memory to be the Guest's "physical" * memory and stores it in "guest_base". In other words, Guest physical == * Launcher virtual with an offset. * * This can be tough to get your head around, but usually it just means that we * use these trivial conversion functions when the Guest gives us its * "physical" addresses: */ static void *from_guest_phys(unsigned long addr) { return guest_base + addr; } static unsigned long to_guest_phys(const void *addr) { return (addr - guest_base); } /*L:130 * Loading the Kernel. * * We start with couple of simple helper routines. open_or_die() avoids * error-checking code cluttering the callers: */ static int open_or_die(const char *name, int flags) { int fd = open(name, flags); if (fd < 0) err(1, "Failed to open %s", name); return fd; } /* map_zeroed_pages() takes a number of pages. */ static void *map_zeroed_pages(unsigned int num) { int fd = open_or_die("/dev/zero", O_RDONLY); void *addr; /* * We use a private mapping (ie. if we write to the page, it will be * copied). */ addr = mmap(NULL, getpagesize() * num, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) err(1, "Mmapping %u pages of /dev/zero", num); /* * One neat mmap feature is that you can close the fd, and it * stays mapped. */ close(fd); return addr; } /* Get some more pages for a device. */ static void *get_pages(unsigned int num) { void *addr = from_guest_phys(guest_limit); guest_limit += num * getpagesize(); if (guest_limit > guest_max) errx(1, "Not enough memory for devices"); return addr; } /* * This routine is used to load the kernel or initrd. It tries mmap, but if * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), * it falls back to reading the memory in. */ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) { ssize_t r; /* * We map writable even though for some segments are marked read-only. * The kernel really wants to be writable: it patches its own * instructions. * * MAP_PRIVATE means that the page won't be copied until a write is * done to it. This allows us to share untouched memory between * Guests. */ if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) return; /* pread does a seek and a read in one shot: saves a few lines. */ r = pread(fd, addr, len, offset); if (r != len) err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); } /* * This routine takes an open vmlinux image, which is in ELF, and maps it into * the Guest memory. ELF = Embedded Linking Format, which is the format used * by all modern binaries on Linux including the kernel. * * The ELF headers give *two* addresses: a physical address, and a virtual * address. We use the physical address; the Guest will map itself to the * virtual address. * * We return the starting address. */ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) { Elf32_Phdr phdr[ehdr->e_phnum]; unsigned int i; /* * Sanity checks on the main ELF header: an x86 executable with a * reasonable number of correctly-sized program headers. */ if (ehdr->e_type != ET_EXEC || ehdr->e_machine != EM_386 || ehdr->e_phentsize != sizeof(Elf32_Phdr) || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) errx(1, "Malformed elf header"); /* * An ELF executable contains an ELF header and a number of "program" * headers which indicate which parts ("segments") of the program to * load where. */ /* We read in all the program headers at once: */ if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) err(1, "Seeking to program headers"); if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) err(1, "Reading program headers"); /* * Try all the headers: there are usually only three. A read-only one, * a read-write one, and a "note" section which we don't load. */ for (i = 0; i < ehdr->e_phnum; i++) { /* If this isn't a loadable segment, we ignore it */ if (phdr[i].p_type != PT_LOAD) continue; verbose("Section %i: size %i addr %p\n", i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); /* We map this section of the file at its physical address. */ map_at(elf_fd, from_guest_phys(phdr[i].p_paddr), phdr[i].p_offset, phdr[i].p_filesz); } /* The entry point is given in the ELF header. */ return ehdr->e_entry; } /*L:150 * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed * to jump into it and it will unpack itself. We used to have to perform some * hairy magic because the unpacking code scared me. * * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote * a small patch to jump over the tricky bits in the Guest, so now we just read * the funky header so we know where in the file to load, and away we go! */ static unsigned long load_bzimage(int fd) { struct boot_params boot; int r; /* Modern bzImages get loaded at 1M. */ void *p = from_guest_phys(0x100000); /* * Go back to the start of the file and read the header. It should be * a Linux boot header (see Documentation/x86/i386/boot.txt) */ lseek(fd, 0, SEEK_SET); read(fd, &boot, sizeof(boot)); /* Inside the setup_hdr, we expect the magic "HdrS" */ if (memcmp(&boot.hdr.header, "HdrS", 4) != 0) errx(1, "This doesn't look like a bzImage to me"); /* Skip over the extra sectors of the header. */ lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET); /* Now read everything into memory. in nice big chunks. */ while ((r = read(fd, p, 65536)) > 0) p += r; /* Finally, code32_start tells us where to enter the kernel. */ return boot.hdr.code32_start; } /*L:140 * Loading the kernel is easy when it's a "vmlinux", but most kernels * come wrapped up in the self-decompressing "bzImage" format. With a little * work, we can load those, too. */ static unsigned long load_kernel(int fd) { Elf32_Ehdr hdr; /* Read in the first few bytes. */ if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) err(1, "Reading kernel"); /* If it's an ELF file, it starts with "\177ELF" */ if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0) return map_elf(fd, &hdr); /* Otherwise we assume it's a bzImage, and try to load it. */ return load_bzimage(fd); } /* * This is a trivial little helper to align pages. Andi Kleen hated it because * it calls getpagesize() twice: "it's dumb code." * * Kernel guys get really het up about optimization, even when it's not * necessary. I leave this code as a reaction against that. */ static inline unsigned long page_align(unsigned long addr) { /* Add upwards and truncate downwards. */ return ((addr + getpagesize()-1) & ~(getpagesize()-1)); } /*L:180 * An "initial ram disk" is a disk image loaded into memory along with the * kernel which the kernel can use to boot from without needing any drivers. * Most distributions now use this as standard: the initrd contains the code to * load the appropriate driver modules for the current machine. * * Importantly, James Morris works for RedHat, and Fedora uses initrds for its * kernels. He sent me this (and tells me when I break it). */ static unsigned long load_initrd(const char *name, unsigned long mem) { int ifd; struct stat st; unsigned long len; ifd = open_or_die(name, O_RDONLY); /* fstat() is needed to get the file size. */ if (fstat(ifd, &st) < 0) err(1, "fstat() on initrd '%s'", name); /* * We map the initrd at the top of memory, but mmap wants it to be * page-aligned, so we round the size up for that. */ len = page_align(st.st_size); map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); /* * Once a file is mapped, you can close the file descriptor. It's a * little odd, but quite useful. */ close(ifd); verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); /* We return the initrd size. */ return len; } /*:*/ /* * Simple routine to roll all the commandline arguments together with spaces * between them. */ static void concat(char *dst, char *args[]) { unsigned int i, len = 0; for (i = 0; args[i]; i++) { if (i) { strcat(dst+len, " "); len++; } strcpy(dst+len, args[i]); len += strlen(args[i]); } /* In case it's empty. */ dst[len] = '\0'; } /*L:185 * This is where we actually tell the kernel to initialize the Guest. We * saw the arguments it expects when we looked at initialize() in lguest_user.c: * the base of Guest "physical" memory, the top physical page to allow and the * entry point for the Guest. */ static void tell_kernel(unsigned long start) { unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, guest_limit / getpagesize(), start }; verbose("Guest: %p - %p (%#lx)\n", guest_base, guest_base + guest_limit, guest_limit); lguest_fd = open_or_die("/dev/lguest", O_RDWR); if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Writing to /dev/lguest"); } /*:*/ /*L:200 * Device Handling. * * When the Guest gives us a buffer, it sends an array of addresses and sizes. * We need to make sure it's not trying to reach into the Launcher itself, so * we have a convenient routine which checks it and exits with an error message * if something funny is going on: */ static void *_check_pointer(unsigned long addr, unsigned int size, unsigned int line) { /* * We have to separately check addr and addr+size, because size could * be huge and addr + size might wrap around. */ if (addr >= guest_limit || addr + size >= guest_limit) errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); /* * We return a pointer for the caller's convenience, now we know it's * safe to use. */ return from_guest_phys(addr); } /* A macro which transparently hands the line number to the real function. */ #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) /* * Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, or vq->vring.num if we're * at the end. */ static unsigned next_desc(struct vring_desc *desc, unsigned int i, unsigned int max) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc[i].flags & VRING_DESC_F_NEXT)) return max; /* Check they're not leading us off end of descriptors. */ next = desc[i].next; /* Make sure compiler knows to grab that: we don't want it changing! */ wmb(); if (next >= max) errx(1, "Desc next is %u", next); return next; } /* * This actually sends the interrupt for this virtqueue, if we've used a * buffer. */ static void trigger_irq(struct virtqueue *vq) { unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; /* Don't inform them if nothing used. */ if (!vq->pending_used) return; vq->pending_used = 0; /* If they don't want an interrupt, don't send one... */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { /* ... unless they've asked us to force one on empty. */ if (!vq->dev->irq_on_empty || lg_last_avail(vq) != vq->vring.avail->idx) return; } /* Send the Guest an interrupt tell them we used something up. */ if (write(lguest_fd, buf, sizeof(buf)) != 0) err(1, "Triggering irq %i", vq->config.irq); } /* * This looks in the virtqueue for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function waits if necessary, and returns the descriptor number found. */ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; /* * Since we're about to sleep, now is a good time to tell the * Guest about what we've used up to now. */ trigger_irq(vq); /* OK, now we need to know about added descriptors. */ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } /* Nothing new? Wait for eventfd to tell us they refilled. */ if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); /* We don't need to be notified again. */ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; /* * If this is an indirect entry, then this buffer contains a descriptor * table which we handle as if it's any normal descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* * If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; } /* * After we've used one of their buffers, we tell the Guest about it. Sometime * later we'll want to send them an interrupt using trigger_irq(); note that * wait_for_vq_desc() does that for us if it has to wait. */ static void add_used(struct virtqueue *vq, unsigned int head, int len) { struct vring_used_elem *used; /* * The virtqueue contains a ring of used buffers. Get a pointer to the * next entry in that used ring. */ used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; used->id = head; used->len = len; /* Make sure buffer is written before we update index. */ wmb(); vq->vring.used->idx++; vq->pending_used++; } /* And here's the combo meal deal. Supersize me! */ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) { add_used(vq, head, len); trigger_irq(vq); } /* * The Console * * We associate some data with the console for our exit hack. */ struct console_abort { /* How many times have they hit ^C? */ int count; /* When did they start? */ struct timeval start; }; /* This is the routine which handles console input (ie. stdin). */ static void console_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num; struct console_abort *abort = vq->dev->priv; struct iovec iov[vq->vring.num]; /* Make sure there's a descriptor available. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) errx(1, "Output buffers in console in queue?"); /* Read into it. This is where we usually wait. */ len = readv(STDIN_FILENO, iov, in_num); if (len <= 0) { /* Ran out of input? */ warnx("Failed to get console input, ignoring console."); /* * For simplicity, dying threads kill the whole Launcher. So * just nap here. */ for (;;) pause(); } /* Tell the Guest we used a buffer. */ add_used_and_trigger(vq, head, len); /* * Three ^C within one second? Exit. * * This is such a hack, but works surprisingly well. Each ^C has to * be in a buffer by itself, so they can't be too fast. But we check * that we get three within about a second, so they can't be too * slow. */ if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { abort->count = 0; return; } abort->count++; if (abort->count == 1) gettimeofday(&abort->start, NULL); else if (abort->count == 3) { struct timeval now; gettimeofday(&now, NULL); /* Kill all Launcher processes with SIGINT, like normal ^C */ if (now.tv_sec <= abort->start.tv_sec+1) kill(0, SIGINT); abort->count = 0; } } /* This is the routine which handles console output (ie. stdout). */ static void console_output(struct virtqueue *vq) { unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here, for the Guest to give us something. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) errx(1, "Input buffers in console output queue?"); /* writev can return a partial write, so we loop here. */ while (!iov_empty(iov, out)) { int len = writev(STDOUT_FILENO, iov, out); if (len <= 0) err(1, "Write to stdout gave %i", len); iov_consume(iov, out, len); } /* * We're finished with that buffer: if we're going to sleep, * wait_for_vq_desc() will prod the Guest with an interrupt. */ add_used(vq, head, 0); } /* * The Network * * Handling output for network is also simple: we get all the output buffers * and write them to /dev/net/tun. */ struct net_info { int tunfd; }; static void net_output(struct virtqueue *vq) { struct net_info *net_info = vq->dev->priv; unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here for the Guest to give us a packet. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) errx(1, "Input buffers in net output queue?"); /* * Send the whole thing through to /dev/net/tun. It expects the exact * same format: what a coincidence! */ if (writev(net_info->tunfd, iov, out) < 0) errx(1, "Write to tun failed?"); /* * Done with that one; wait_for_vq_desc() will send the interrupt if * all packets are processed. */ add_used(vq, head, 0); } /* * Handling network input is a bit trickier, because I've tried to optimize it. * * First we have a helper routine which tells is if from this file descriptor * (ie. the /dev/net/tun device) will block: */ static bool will_block(int fd) { fd_set fdset; struct timeval zero = { 0, 0 }; FD_ZERO(&fdset); FD_SET(fd, &fdset); return select(fd+1, &fdset, NULL, NULL, &zero) != 1; } /* * This handles packets coming in from the tun device to our Guest. Like all * service routines, it gets called again as soon as it returns, so you don't * see a while(1) loop here. */ static void net_input(struct virtqueue *vq) { int len; unsigned int head, out, in; struct iovec iov[vq->vring.num]; struct net_info *net_info = vq->dev->priv; /* * Get a descriptor to write an incoming packet into. This will also * send an interrupt if they're out of descriptors. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (out) errx(1, "Output buffers in net input queue?"); /* * If it looks like we'll block reading from the tun device, send them * an interrupt. */ if (vq->pending_used && will_block(net_info->tunfd)) trigger_irq(vq); /* * Read in the packet. This is where we normally wait (when there's no * incoming network traffic). */ len = readv(net_info->tunfd, iov, in); if (len <= 0) err(1, "Failed to read from tun."); /* * Mark that packet buffer as used, but don't interrupt here. We want * to wait until we've done as much work as we can. */ add_used(vq, head, len); } /*:*/ /* This is the helper to create threads: run the service routine in a loop. */ static int do_thread(void *_vq) { struct virtqueue *vq = _vq; for (;;) vq->service(vq); return 0; } /* * When a child dies, we kill our entire process group with SIGTERM. This * also has the side effect that the shell restores the console for us! */ static void kill_launcher(int signal) { kill(0, SIGTERM); } static void reset_device(struct device *dev) { struct virtqueue *vq; verbose("Resetting device %s\n", dev->name); /* Clear any features they've acked. */ memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); /* Zero out the virtqueues, get rid of their threads */ for (vq = dev->vq; vq; vq = vq->next) { if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } memset(vq->vring.desc, 0, vring_size(vq->config.num, LGUEST_VRING_ALIGN)); lg_last_avail(vq) = 0; } dev->running = false; /* Now we care if threads die. */ signal(SIGCHLD, (void *)kill_launcher); } /*L:216 * This actually creates the thread which services the virtqueue for a device. */ static void create_thread(struct virtqueue *vq) { /* * Create stack for thread. Since the stack grows upwards, we point * the stack pointer to the end of this region. */ char *stack = malloc(32768); unsigned long args[] = { LHREQ_EVENTFD, vq->config.pfn*getpagesize(), 0 }; /* Create a zero-initialized eventfd. */ vq->eventfd = eventfd(0, 0); if (vq->eventfd < 0) err(1, "Creating eventfd"); args[2] = vq->eventfd; /* * Attach an eventfd to this virtqueue: it will go off when the Guest * does an LHCALL_NOTIFY for this vq. */ if (write(lguest_fd, &args, sizeof(args)) != 0) err(1, "Attaching eventfd"); /* * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so * we get a signal if it dies. */ vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); if (vq->thread == (pid_t)-1) err(1, "Creating clone"); /* We close our local copy now the child has it. */ close(vq->eventfd); } static bool accepted_feature(struct device *dev, unsigned int bit) { const u8 *features = get_feature_bits(dev) + dev->feature_len; if (dev->feature_len < bit / CHAR_BIT) return false; return features[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)); } static void start_device(struct device *dev) { unsigned int i; struct virtqueue *vq; verbose("Device %s OK: offered", dev->name); for (i = 0; i < dev->feature_len; i++) verbose(" %02x", get_feature_bits(dev)[i]); verbose(", accepted"); for (i = 0; i < dev->feature_len; i++) verbose(" %02x", get_feature_bits(dev) [dev->feature_len+i]); dev->irq_on_empty = accepted_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY); for (vq = dev->vq; vq; vq = vq->next) { if (vq->service) create_thread(vq); } dev->running = true; } static void cleanup_devices(void) { struct device *dev; for (dev = devices.dev; dev; dev = dev->next) reset_device(dev); /* If we saved off the original terminal settings, restore them now. */ if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); } /* When the Guest tells us they updated the status field, we handle it. */ static void update_device_status(struct device *dev) { /* A zero status is a reset, otherwise it's a set of flags. */ if (dev->desc->status == 0) reset_device(dev); else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { warnx("Device %s configuration FAILED", dev->name); if (dev->running) reset_device(dev); } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { if (!dev->running) start_device(dev); } } /*L:215 * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In * particular, it's used to notify us of device status changes during boot. */ static void handle_output(unsigned long addr) { struct device *i; /* Check each device. */ for (i = devices.dev; i; i = i->next) { struct virtqueue *vq; /* * Notifications to device descriptors mean they updated the * device status. */ if (from_guest_phys(addr) == i->desc) { update_device_status(i); return; } /* * Devices *can* be used before status is set to DRIVER_OK. * The original plan was that they would never do this: they * would always finish setting up their status bits before * actually touching the virtqueues. In practice, we allowed * them to, and they do (eg. the disk probes for partition * tables as part of initialization). * * If we see this, we start the device: once it's running, we * expect the device to catch all the notifications. */ for (vq = i->vq; vq; vq = vq->next) { if (addr != vq->config.pfn*getpagesize()) continue; if (i->running) errx(1, "Notification on running %s", i->name); /* This just calls create_thread() for each virtqueue */ start_device(i); return; } } /* * Early console write is done using notify on a nul-terminated string * in Guest memory. It's also great for hacking debugging messages * into a Guest. */ if (addr >= guest_limit) errx(1, "Bad NOTIFY %#lx", addr); write(STDOUT_FILENO, from_guest_phys(addr), strnlen(from_guest_phys(addr), guest_limit - addr)); } /*L:190 * Device Setup * * All devices need a descriptor so the Guest knows it exists, and a "struct * device" so the Launcher can keep track of it. We have common helper * routines to allocate and manage them. */ /* * The layout of the device page is a "struct lguest_device_desc" followed by a * number of virtqueue descriptors, then two sets of feature bits, then an * array of configuration bytes. This routine returns the configuration * pointer. */ static u8 *device_config(const struct device *dev) { return (void *)(dev->desc + 1) + dev->num_vq * sizeof(struct lguest_vqconfig) + dev->feature_len * 2; } /* * This routine allocates a new "struct lguest_device_desc" from descriptor * table page just above the Guest's normal memory. It returns a pointer to * that descriptor. */ static struct lguest_device_desc *new_dev_desc(u16 type) { struct lguest_device_desc d = { .type = type }; void *p; /* Figure out where the next device config is, based on the last one. */ if (devices.lastdev) p = device_config(devices.lastdev) + devices.lastdev->desc->config_len; else p = devices.descpage; /* We only have one page for all the descriptors. */ if (p + sizeof(d) > (void *)devices.descpage + getpagesize()) errx(1, "Too many devices"); /* p might not be aligned, so we memcpy in. */ return memcpy(p, &d, sizeof(d)); } /* * Each device descriptor is followed by the description of its virtqueues. We * specify how many descriptors the virtqueue is to have. */ static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; /* First we need some memory for this virtqueue. */ pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); /* Initialize the virtqueue */ vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; /* * This is the routine the service thread will run, and its Process ID * once it's running. */ vq->service = service; vq->thread = (pid_t)-1; /* Initialize the configuration. */ vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); /* Initialize the vring. */ vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); /* * Append virtqueue to this device's descriptor. We use * device_config() to get the end of the device's current virtqueues; * we check that we haven't added any config or feature information * yet, otherwise we'd be overwriting them. */ assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; } /* * The first half of the feature bitmask is for us to advertise features. The * second half is for the Guest to accept features. */ static void add_feature(struct device *dev, unsigned bit) { u8 *features = get_feature_bits(dev); /* We can't extend the feature bits once we've added config bytes */ if (dev->desc->feature_len <= bit / CHAR_BIT) { assert(dev->desc->config_len == 0); dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; } features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); } /* * This routine sets the configuration fields for an existing device's * descriptor. It only works for the last device, but that's OK because that's * how we use it. */ static void set_config(struct device *dev, unsigned len, const void *conf) { /* Check we haven't overflowed our single page. */ if (device_config(dev) + len > devices.descpage + getpagesize()) errx(1, "Too many devices"); /* Copy in the config information, and store the length. */ memcpy(device_config(dev), conf, len); dev->desc->config_len = len; /* Size must fit in config_len field (8 bits)! */ assert(dev->desc->config_len == len); } /* * This routine does all the creation and setup of a new device, including * calling new_dev_desc() to allocate the descriptor and device memory. We * don't actually start the service threads until later. * * See what I mean about userspace being boring? */ static struct device *new_device(const char *name, u16 type) { struct device *dev = malloc(sizeof(*dev)); /* Now we populate the fields one at a time. */ dev->desc = new_dev_desc(type); dev->name = name; dev->vq = NULL; dev->feature_len = 0; dev->num_vq = 0; dev->running = false; /* * Append to device list. Prepending to a single-linked list is * easier, but the user expects the devices to be arranged on the bus * in command-line order. The first network device on the command line * is eth0, the first block device /dev/vda, etc. */ if (devices.lastdev) devices.lastdev->next = dev; else devices.dev = dev; devices.lastdev = dev; return dev; } /* * Our first setup routine is the console. It's a fairly simple device, but * UNIX tty handling makes it uglier than it could be. */ static void setup_console(void) { struct device *dev; /* If we can save the initial standard input settings... */ if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { struct termios term = orig_term; /* * Then we turn off echo, line buffering and ^C etc: We want a * raw input stream to the Guest. */ term.c_lflag &= ~(ISIG|ICANON|ECHO); tcsetattr(STDIN_FILENO, TCSANOW, &term); } dev = new_device("console", VIRTIO_ID_CONSOLE); /* We store the console state in dev->priv, and initialize it. */ dev->priv = malloc(sizeof(struct console_abort)); ((struct console_abort *)dev->priv)->count = 0; /* * The console needs two virtqueues: the input then the output. When * they put something the input queue, we make sure we're listening to * stdin. When they put something in the output queue, we write it to * stdout. */ add_virtqueue(dev, VIRTQUEUE_NUM, console_input); add_virtqueue(dev, VIRTQUEUE_NUM, console_output); verbose("device %u: console\n", ++devices.device_num); } /*:*/ /*M:010 * Inter-guest networking is an interesting area. Simplest is to have a * --sharenet=<name> option which opens or creates a named pipe. This can be * used to send packets to another guest in a 1:1 manner. * * More sopisticated is to use one of the tools developed for project like UML * to do networking. * * Faster is to do virtio bonding in kernel. Doing this 1:1 would be * completely generic ("here's my vring, attach to your vring") and would work * for any traffic. Of course, namespace and permissions issues need to be * dealt with. A more sophisticated "multi-channel" virtio_net.c could hide * multiple inter-guest channels behind one interface, although it would * require some manner of hotplugging new virtio channels. * * Finally, we could implement a virtio network switch in the kernel. :*/ static u32 str2ip(const char *ipaddr) { unsigned int b[4]; if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4) errx(1, "Failed to parse IP address '%s'", ipaddr); return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; } static void str2mac(const char *macaddr, unsigned char mac[6]) { unsigned int m[6]; if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x", &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6) errx(1, "Failed to parse mac address '%s'", macaddr); mac[0] = m[0]; mac[1] = m[1]; mac[2] = m[2]; mac[3] = m[3]; mac[4] = m[4]; mac[5] = m[5]; } /* * This code is "adapted" from libbridge: it attaches the Host end of the * network device to the bridge device specified by the command line. * * This is yet another James Morris contribution (I'm an IP-level guy, so I * dislike bridging), and I just try not to break it. */ static void add_to_bridge(int fd, const char *if_name, const char *br_name) { int ifidx; struct ifreq ifr; if (!*br_name) errx(1, "must specify bridge name"); ifidx = if_nametoindex(if_name); if (!ifidx) errx(1, "interface %s does not exist!", if_name); strncpy(ifr.ifr_name, br_name, IFNAMSIZ); ifr.ifr_name[IFNAMSIZ-1] = '\0'; ifr.ifr_ifindex = ifidx; if (ioctl(fd, SIOCBRADDIF, &ifr) < 0) err(1, "can't add %s to bridge %s", if_name, br_name); } /* * This sets up the Host end of the network device with an IP address, brings * it up so packets will flow, the copies the MAC address into the hwaddr * pointer. */ static void configure_device(int fd, const char *tapif, u32 ipaddr) { struct ifreq ifr; struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapif); /* Don't read these incantations. Just cut & paste them like I did! */ sin->sin_family = AF_INET; sin->sin_addr.s_addr = htonl(ipaddr); if (ioctl(fd, SIOCSIFADDR, &ifr) != 0) err(1, "Setting %s interface address", tapif); ifr.ifr_flags = IFF_UP; if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) err(1, "Bringing interface %s up", tapif); } static int get_tun_device(char tapif[IFNAMSIZ]) { struct ifreq ifr; int netfd; /* Start with this zeroed. Messy but sure. */ memset(&ifr, 0, sizeof(ifr)); /* * We open the /dev/net/tun device and tell it we want a tap device. A * tap device is like a tun device, only somehow different. To tell * the truth, I completely blundered my way through this code, but it * works now! */ netfd = open_or_die("/dev/net/tun", O_RDWR); ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; strcpy(ifr.ifr_name, "tap%d"); if (ioctl(netfd, TUNSETIFF, &ifr) != 0) err(1, "configuring /dev/net/tun"); if (ioctl(netfd, TUNSETOFFLOAD, TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) err(1, "Could not set features for tun device"); /* * We don't need checksums calculated for packets coming in this * device: trust us! */ ioctl(netfd, TUNSETNOCSUM, 1); memcpy(tapif, ifr.ifr_name, IFNAMSIZ); return netfd; } /*L:195 * Our network is a Host<->Guest network. This can either use bridging or * routing, but the principle is the same: it uses the "tun" device to inject * packets into the Host as if they came in from a normal network card. We * just shunt packets between the Guest and the tun device. */ static void setup_tun_net(char *arg) { struct device *dev; struct net_info *net_info = malloc(sizeof(*net_info)); int ipfd; u32 ip = INADDR_ANY; bool bridging = false; char tapif[IFNAMSIZ], *p; struct virtio_net_config conf; net_info->tunfd = get_tun_device(tapif); /* First we create a new network device. */ dev = new_device("net", VIRTIO_ID_NET); dev->priv = net_info; /* Network devices need a recv and a send queue, just like console. */ add_virtqueue(dev, VIRTQUEUE_NUM, net_input); add_virtqueue(dev, VIRTQUEUE_NUM, net_output); /* * We need a socket to perform the magic network ioctls to bring up the * tap interface, connect to the bridge etc. Any socket will do! */ ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); if (ipfd < 0) err(1, "opening IP socket"); /* If the command line was --tunnet=bridge:<name> do bridging. */ if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) { arg += strlen(BRIDGE_PFX); bridging = true; } /* A mac address may follow the bridge name or IP address */ p = strchr(arg, ':'); if (p) { str2mac(p+1, conf.mac); add_feature(dev, VIRTIO_NET_F_MAC); *p = '\0'; } /* arg is now either an IP address or a bridge name */ if (bridging) add_to_bridge(ipfd, tapif, arg); else ip = str2ip(arg); /* Set up the tun device. */ configure_device(ipfd, tapif, ip); add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY); /* Expect Guest to handle everything except UFO */ add_feature(dev, VIRTIO_NET_F_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_TSO4); add_feature(dev, VIRTIO_NET_F_GUEST_TSO6); add_feature(dev, VIRTIO_NET_F_GUEST_ECN); add_feature(dev, VIRTIO_NET_F_HOST_TSO4); add_feature(dev, VIRTIO_NET_F_HOST_TSO6); add_feature(dev, VIRTIO_NET_F_HOST_ECN); /* We handle indirect ring entries */ add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); set_config(dev, sizeof(conf), &conf); /* We don't need the socket any more; setup is done. */ close(ipfd); devices.device_num++; if (bridging) verbose("device %u: tun %s attached to bridge: %s\n", devices.device_num, tapif, arg); else verbose("device %u: tun %s: %s\n", devices.device_num, tapif, arg); } /*:*/ /* This hangs off device->priv. */ struct vblk_info { /* The size of the file. */ off64_t len; /* The file descriptor for the file. */ int fd; }; /*L:210 * The Disk * * The disk only has one virtqueue, so it only has one thread. It is really * simple: the Guest asks for a block number and we read or write that position * in the file. * * Before we serviced each virtqueue in a separate thread, that was unacceptably * slow: the Guest waits until the read is finished before running anything * else, even if it could have been doing useful work. * * We could have used async I/O, except it's reputed to suck so hard that * characters actually go missing from your code when you try to use it. */ static void blk_request(struct virtqueue *vq) { struct vblk_info *vblk = vq->dev->priv; unsigned int head, out_num, in_num, wlen; int ret; u8 *in; struct virtio_blk_outhdr *out; struct iovec iov[vq->vring.num]; off64_t off; /* * Get the next request, where we normally wait. It triggers the * interrupt to acknowledge previously serviced requests (if any). */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); /* * Every block request should contain at least one output buffer * (detailing the location on disk and the type of request) and one * input buffer (to hold the result). */ if (out_num == 0 || in_num == 0) errx(1, "Bad virtblk cmd %u out=%u in=%u", head, out_num, in_num); out = convert(&iov[0], struct virtio_blk_outhdr); in = convert(&iov[out_num+in_num-1], u8); /* * For historical reasons, block operations are expressed in 512 byte * "sectors". */ off = out->sector * 512; /* * The block device implements "barriers", where the Guest indicates * that it wants all previous writes to occur before this write. We * don't have a way of asking our kernel to do a barrier, so we just * synchronize all the data in the file. Pretty poor, no? */ if (out->type & VIRTIO_BLK_T_BARRIER) fdatasync(vblk->fd); /* * In general the virtio block driver is allowed to try SCSI commands. * It'd be nice if we supported eject, for example, but we don't. */ if (out->type & VIRTIO_BLK_T_SCSI_CMD) { fprintf(stderr, "Scsi commands unsupported\n"); *in = VIRTIO_BLK_S_UNSUPP; wlen = sizeof(*in); } else if (out->type & VIRTIO_BLK_T_OUT) { /* * Write * * Move to the right location in the block file. This can fail * if they try to write past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out->sector); ret = writev(vblk->fd, iov+1, out_num-1); verbose("WRITE to sector %llu: %i\n", out->sector, ret); /* * Grr... Now we know how long the descriptor they sent was, we * make sure they didn't try to write over the end of the block * file (possibly extending it). */ if (ret > 0 && off + ret > vblk->len) { /* Trim it back to the correct length */ ftruncate64(vblk->fd, vblk->len); /* Die, bad Guest, die. */ errx(1, "Write past end %llu+%u", off, ret); } wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else { /* * Read * * Move to the right location in the block file. This can fail * if they try to read past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out->sector); ret = readv(vblk->fd, iov+1, in_num-1); verbose("READ from sector %llu: %i\n", out->sector, ret); if (ret >= 0) { wlen = sizeof(*in) + ret; *in = VIRTIO_BLK_S_OK; } else { wlen = sizeof(*in); *in = VIRTIO_BLK_S_IOERR; } } /* * OK, so we noted that it was pretty poor to use an fdatasync as a * barrier. But Christoph Hellwig points out that we need a sync * *afterwards* as well: "Barriers specify no reordering to the front * or the back." And Jens Axboe confirmed it, so here we are: */ if (out->type & VIRTIO_BLK_T_BARRIER) fdatasync(vblk->fd); /* Finished that request. */ add_used(vq, head, wlen); } /*L:198 This actually sets up a virtual block device. */ static void setup_block_file(const char *filename) { struct device *dev; struct vblk_info *vblk; struct virtio_blk_config conf; /* Creat the device. */ dev = new_device("block", VIRTIO_ID_BLOCK); /* The device has one virtqueue, where the Guest places requests. */ add_virtqueue(dev, VIRTQUEUE_NUM, blk_request); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); /* First we open the file and store the length. */ vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); vblk->len = lseek64(vblk->fd, 0, SEEK_END); /* We support barriers. */ add_feature(dev, VIRTIO_BLK_F_BARRIER); /* Tell Guest how many sectors this device has. */ conf.capacity = cpu_to_le64(vblk->len / 512); /* * Tell Guest not to put in too many descriptors at once: two are used * for the in and out elements. */ add_feature(dev, VIRTIO_BLK_F_SEG_MAX); conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); /* Don't try to put whole struct: we have 8 bit limit. */ set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); verbose("device %u: virtblock %llu sectors\n", ++devices.device_num, le64_to_cpu(conf.capacity)); } /*L:211 * Our random number generator device reads from /dev/random into the Guest's * input buffers. The usual case is that the Guest doesn't want random numbers * and so has no buffers although /dev/random is still readable, whereas * console is the reverse. * * The same logic applies, however. */ struct rng_info { int rfd; }; static void rng_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num, totlen = 0; struct rng_info *rng_info = vq->dev->priv; struct iovec iov[vq->vring.num]; /* First we need a buffer from the Guests's virtqueue. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) errx(1, "Output buffers in rng?"); /* * Just like the console write, we loop to cover the whole iovec. * In this case, short reads actually happen quite a bit. */ while (!iov_empty(iov, in_num)) { len = readv(rng_info->rfd, iov, in_num); if (len <= 0) err(1, "Read from /dev/random gave %i", len); iov_consume(iov, in_num, len); totlen += len; } /* Tell the Guest about the new input. */ add_used(vq, head, totlen); } /*L:199 * This creates a "hardware" random number device for the Guest. */ static void setup_rng(void) { struct device *dev; struct rng_info *rng_info = malloc(sizeof(*rng_info)); /* Our device's privat info simply contains the /dev/random fd. */ rng_info->rfd = open_or_die("/dev/random", O_RDONLY); /* Create the new device. */ dev = new_device("rng", VIRTIO_ID_RNG); dev->priv = rng_info; /* The device has one virtqueue, where the Guest places inbufs. */ add_virtqueue(dev, VIRTQUEUE_NUM, rng_input); verbose("device %u: rng\n", devices.device_num++); } /* That's the end of device setup. */ /*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */ static void __attribute__((noreturn)) restart_guest(void) { unsigned int i; /* * Since we don't track all open fds, we simply close everything beyond * stderr. */ for (i = 3; i < FD_SETSIZE; i++) close(i); /* Reset all the devices (kills all threads). */ cleanup_devices(); execv(main_args[0], main_args); err(1, "Could not exec %s", main_args[0]); } /*L:220 * Finally we reach the core of the Launcher which runs the Guest, serves * its input and output, and finally, lays it to rest. */ static void __attribute__((noreturn)) run_guest(void) { for (;;) { unsigned long notify_addr; int readval; /* We read from the /dev/lguest device to run the Guest. */ readval = pread(lguest_fd, &notify_addr, sizeof(notify_addr), cpu_id); /* One unsigned long means the Guest did HCALL_NOTIFY */ if (readval == sizeof(notify_addr)) { verbose("Notify on address %#lx\n", notify_addr); handle_output(notify_addr); /* ENOENT means the Guest died. Reading tells us why. */ } else if (errno == ENOENT) { char reason[1024] = { 0 }; pread(lguest_fd, reason, sizeof(reason)-1, cpu_id); errx(1, "%s", reason); /* ERESTART means that we need to reboot the guest */ } else if (errno == ERESTART) { restart_guest(); /* Anything else means a bug or incompatible change. */ } else err(1, "Running guest failed"); } } /*L:240 * This is the end of the Launcher. The good news: we are over halfway * through! The bad news: the most fiendish part of the code still lies ahead * of us. * * Are you ready? Take a deep breath and join me in the core of the Host, in * "make Host". :*/ static struct option opts[] = { { "verbose", 0, NULL, 'v' }, { "tunnet", 1, NULL, 't' }, { "block", 1, NULL, 'b' }, { "rng", 0, NULL, 'r' }, { "initrd", 1, NULL, 'i' }, { NULL }, }; static void usage(void) { errx(1, "Usage: lguest [--verbose] " "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n" "|--block=<filename>|--initrd=<filename>]...\n" "<mem-in-mb> vmlinux [args...]"); } /*L:105 The main routine is where the real work begins: */ int main(int argc, char *argv[]) { /* Memory, code startpoint and size of the (optional) initrd. */ unsigned long mem = 0, start, initrd_size = 0; /* Two temporaries. */ int i, c; /* The boot information for the Guest. */ struct boot_params *boot; /* If they specify an initrd file to load. */ const char *initrd_name = NULL; /* Save the args: we "reboot" by execing ourselves again. */ main_args = argv; /* * First we initialize the device list. We keep a pointer to the last * device, and the next interrupt number to use for devices (1: * remember that 0 is used by the timer). */ devices.lastdev = NULL; devices.next_irq = 1; /* We're CPU 0. In fact, that's the only CPU possible right now. */ cpu_id = 0; /* * We need to know how much memory so we can set up the device * descriptor and memory pages for the devices as we parse the command * line. So we quickly look through the arguments to find the amount * of memory now. */ for (i = 1; i < argc; i++) { if (argv[i][0] != '-') { mem = atoi(argv[i]) * 1024 * 1024; /* * We start by mapping anonymous pages over all of * guest-physical memory range. This fills it with 0, * and ensures that the Guest won't be killed when it * tries to access it. */ guest_base = map_zeroed_pages(mem / getpagesize() + DEVICE_PAGES); guest_limit = mem; guest_max = mem + DEVICE_PAGES*getpagesize(); devices.descpage = get_pages(1); break; } } /* The options are fairly straight-forward */ while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { switch (c) { case 'v': verbose = true; break; case 't': setup_tun_net(optarg); break; case 'b': setup_block_file(optarg); break; case 'r': setup_rng(); break; case 'i': initrd_name = optarg; break; default: warnx("Unknown argument %s", argv[optind]); usage(); } } /* * After the other arguments we expect memory and kernel image name, * followed by command line arguments for the kernel. */ if (optind + 2 > argc) usage(); verbose("Guest base is at %p\n", guest_base); /* We always have a console device */ setup_console(); /* Now we load the kernel */ start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); /* Boot information is stashed at physical address 0 */ boot = from_guest_phys(0); /* Map the initrd image if requested (at top of physical memory) */ if (initrd_name) { initrd_size = load_initrd(initrd_name, mem); /* * These are the location in the Linux boot header where the * start and size of the initrd are expected to be found. */ boot->hdr.ramdisk_image = mem - initrd_size; boot->hdr.ramdisk_size = initrd_size; /* The bootloader type 0xFF means "unknown"; that's OK. */ boot->hdr.type_of_loader = 0xFF; } /* * The Linux boot header contains an "E820" memory map: ours is a * simple, single region. */ boot->e820_entries = 1; boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); /* * The boot header contains a command line pointer: we put the command * line after the boot header. */ boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); /* We use a simple helper to copy the arguments separated by spaces. */ concat((char *)(boot + 1), argv+optind+2); /* Boot protocol version: 2.07 supports the fields for lguest. */ boot->hdr.version = 0x207; /* The hardware_subarch value of "1" tells the Guest it's an lguest. */ boot->hdr.hardware_subarch = 1; /* Tell the entry path not to try to reload segment registers. */ boot->hdr.loadflags |= KEEP_SEGMENTS; /* * We tell the kernel to initialize the Guest: this returns the open * /dev/lguest file descriptor. */ tell_kernel(start); /* Ensure that we terminate if a device-servicing child dies. */ signal(SIGCHLD, kill_launcher); /* If we exit via err(), this kills all the threads, restores tty. */ atexit(cleanup_devices); /* Finally, run the Guest. This doesn't return. */ run_guest(); } /*:*/ /*M:999 * Mastery is done: you now know everything I do. * * But surely you have seen code, features and bugs in your wanderings which * you now yearn to attack? That is the real game, and I look forward to you * patching and forking lguest into the Your-Name-Here-visor. * * Farewell, and good coding! * Rusty Russell. */
gpl-2.0
shorelinedev/aosp_kernel_hammerhead
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
3318
99874
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/uaccess.h> #include <net/cfg80211.h> #include <brcmu_utils.h> #include <defs.h> #include <brcmu_wifi.h> #include "dhd.h" #include "wl_cfg80211.h" #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255}; static u32 brcmf_dbg_level = WL_DBG_ERR; static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data) { dev->driver_data = data; } static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev) { void *data = NULL; if (dev) data = dev->driver_data; return data; } static struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev); return ci->cfg_priv; } static bool check_sys_up(struct wiphy *wiphy) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("device is not ready : status (%d)\n", (int)cfg_priv->status); return false; } return true; } #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ .band = IEEE80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) #define RATETAB_ENT(_rateid, _flags) \ { \ .bitrate = RATE_TO_BASE100KBPS(_rateid), \ .hw_value = (_rateid), \ .flags = (_flags), \ } static struct ieee80211_rate __wl_rates[] = { RATETAB_ENT(BRCM_RATE_1M, 0), RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_6M, 0), RATETAB_ENT(BRCM_RATE_9M, 0), RATETAB_ENT(BRCM_RATE_12M, 0), RATETAB_ENT(BRCM_RATE_18M, 0), RATETAB_ENT(BRCM_RATE_24M, 0), RATETAB_ENT(BRCM_RATE_36M, 0), RATETAB_ENT(BRCM_RATE_48M, 0), RATETAB_ENT(BRCM_RATE_54M, 0), }; #define wl_a_rates (__wl_rates + 4) #define wl_a_rates_size 8 #define wl_g_rates (__wl_rates + 0) #define wl_g_rates_size 12 static struct ieee80211_channel __wl_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static struct ieee80211_channel __wl_5ghz_a_channels[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0), CHAN5G(184, 0), CHAN5G(188, 0), CHAN5G(192, 0), CHAN5G(196, 0), CHAN5G(200, 0), CHAN5G(204, 0), CHAN5G(208, 0), CHAN5G(212, 0), CHAN5G(216, 0), }; static struct ieee80211_channel __wl_5ghz_n_channels[] = { CHAN5G(32, 0), CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(50, 0), CHAN5G(52, 0), CHAN5G(54, 0), CHAN5G(56, 0), CHAN5G(58, 0), CHAN5G(60, 0), CHAN5G(62, 0), CHAN5G(64, 0), CHAN5G(66, 0), CHAN5G(68, 0), CHAN5G(70, 0), CHAN5G(72, 0), CHAN5G(74, 0), CHAN5G(76, 0), CHAN5G(78, 0), CHAN5G(80, 0), CHAN5G(82, 0), CHAN5G(84, 0), CHAN5G(86, 0), CHAN5G(88, 0), CHAN5G(90, 0), CHAN5G(92, 0), CHAN5G(94, 0), CHAN5G(96, 0), CHAN5G(98, 0), CHAN5G(100, 0), CHAN5G(102, 0), CHAN5G(104, 0), CHAN5G(106, 0), CHAN5G(108, 0), CHAN5G(110, 0), CHAN5G(112, 0), CHAN5G(114, 0), CHAN5G(116, 0), CHAN5G(118, 0), CHAN5G(120, 0), CHAN5G(122, 0), CHAN5G(124, 0), CHAN5G(126, 0), CHAN5G(128, 0), CHAN5G(130, 0), CHAN5G(132, 0), CHAN5G(134, 0), CHAN5G(136, 0), CHAN5G(138, 0), CHAN5G(140, 0), CHAN5G(142, 0), CHAN5G(144, 0), CHAN5G(145, 0), CHAN5G(146, 0), CHAN5G(147, 0), CHAN5G(148, 0), CHAN5G(149, 0), CHAN5G(150, 0), CHAN5G(151, 0), CHAN5G(152, 0), CHAN5G(153, 0), CHAN5G(154, 0), CHAN5G(155, 0), CHAN5G(156, 0), CHAN5G(157, 0), CHAN5G(158, 0), CHAN5G(159, 0), CHAN5G(160, 0), CHAN5G(161, 0), CHAN5G(162, 0), CHAN5G(163, 0), CHAN5G(164, 0), CHAN5G(165, 0), CHAN5G(166, 0), CHAN5G(168, 0), CHAN5G(170, 0), CHAN5G(172, 0), CHAN5G(174, 0), CHAN5G(176, 0), CHAN5G(178, 0), CHAN5G(180, 0), CHAN5G(182, 0), CHAN5G(184, 0), CHAN5G(186, 0), CHAN5G(188, 0), CHAN5G(190, 0), CHAN5G(192, 0), CHAN5G(194, 0), CHAN5G(196, 0), CHAN5G(198, 0), CHAN5G(200, 0), CHAN5G(202, 0), CHAN5G(204, 0), CHAN5G(206, 0), CHAN5G(208, 0), CHAN5G(210, 0), CHAN5G(212, 0), CHAN5G(214, 0), CHAN5G(216, 0), CHAN5G(218, 0), CHAN5G(220, 0), CHAN5G(222, 0), CHAN5G(224, 0), CHAN5G(226, 0), CHAN5G(228, 0), }; static struct ieee80211_supported_band __wl_band_2ghz = { .band = IEEE80211_BAND_2GHZ, .channels = __wl_2ghz_channels, .n_channels = ARRAY_SIZE(__wl_2ghz_channels), .bitrates = wl_g_rates, .n_bitrates = wl_g_rates_size, }; static struct ieee80211_supported_band __wl_band_5ghz_a = { .band = IEEE80211_BAND_5GHZ, .channels = __wl_5ghz_a_channels, .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size, }; static struct ieee80211_supported_band __wl_band_5ghz_n = { .band = IEEE80211_BAND_5GHZ, .channels = __wl_5ghz_n_channels, .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels), .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size, }; static const u32 __wl_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, }; /* tag_ID/length/value_buffer tuple */ struct brcmf_tlv { u8 id; u8 len; u8 data[1]; }; /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a u16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; static u16 brcmf_qdbm_to_mw(u8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) /* clamp to max u16 mW value */ return 0xFFFF; /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return (nqdBm_to_mW_map[idx] + factor / 2) / factor; } static u8 brcmf_mw_to_qdbm(u16 mw) { u8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] - nqdBm_to_mW_map[qdbm]) / 2; if (mw_uint < boundary) break; } qdbm += (u8) offset; return qdbm; } /* function for reading/writing a single u32 from/to the dongle */ static int brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par) { int err; __le32 par_le = cpu_to_le32(*par); err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32)); *par = le32_to_cpu(par_le); return err; } static void convert_key_from_CPU(struct brcmf_wsec_key *key, struct brcmf_wsec_key_le *key_le) { key_le->index = cpu_to_le32(key->index); key_le->len = cpu_to_le32(key->len); key_le->algo = cpu_to_le32(key->algo); key_le->flags = cpu_to_le32(key->flags); key_le->rxiv.hi = cpu_to_le32(key->rxiv.hi); key_le->rxiv.lo = cpu_to_le16(key->rxiv.lo); key_le->iv_initialized = cpu_to_le32(key->iv_initialized); memcpy(key_le->data, key->data, sizeof(key->data)); memcpy(key_le->ea, key->ea, sizeof(key->ea)); } static int send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) { int err; struct brcmf_wsec_key_le key_le; convert_key_from_CPU(key, &key_le); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); if (err) WL_ERR("WLC_SET_KEY error (%d)\n", err); return err; } static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct wireless_dev *wdev; s32 infra = 0; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; switch (type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: WL_ERR("type (%d) : currently we do not support this type\n", type); return -EOPNOTSUPP; case NL80211_IFTYPE_ADHOC: cfg_priv->conf->mode = WL_MODE_IBSS; infra = 0; break; case NL80211_IFTYPE_STATION: cfg_priv->conf->mode = WL_MODE_BSS; infra = 1; break; default: err = -EINVAL; goto done; } err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); if (err) { WL_ERR("WLC_SET_INFRA error (%d)\n", err); err = -EAGAIN; } else { wdev = ndev->ieee80211_ptr; wdev->iftype = type; } WL_INFO("IF Type = %s\n", (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra"); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val) { s8 buf[BRCMF_DCMD_SMLEN]; u32 len; s32 err = 0; __le32 val_le; val_le = cpu_to_le32(val); len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf, sizeof(buf)); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len); if (err) WL_ERR("error (%d)\n", err); return err; } static s32 brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval) { union { s8 buf[BRCMF_DCMD_SMLEN]; __le32 val; } var; u32 len; u32 data_null; s32 err = 0; len = brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len); if (err) WL_ERR("error (%d)\n", err); *retval = le32_to_cpu(var.val); return err; } static void brcmf_set_mpc(struct net_device *ndev, int mpc) { s32 err = 0; struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { err = brcmf_dev_intvar_set(ndev, "mpc", mpc); if (err) { WL_ERR("fail to set mpc\n"); return; } WL_INFO("MPC : %d\n", mpc); } } static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, struct brcmf_ssid *ssid) { memcpy(params_le->bssid, ether_bcast, ETH_ALEN); params_le->bss_type = DOT11_BSSTYPE_ANY; params_le->scan_type = 0; params_le->channel_num = 0; params_le->nprobes = cpu_to_le32(-1); params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); if (ssid && ssid->SSID_len) memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); } static s32 brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param, s32 paramlen, void *bufptr, s32 buflen) { s32 iolen; iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen); BUG_ON(!iolen); return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen); } static s32 brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param, s32 paramlen, void *bufptr, s32 buflen) { s32 iolen; iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen); BUG_ON(!iolen); return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen); } static s32 brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan, struct brcmf_ssid *ssid, u16 action) { s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + offsetof(struct brcmf_iscan_params_le, params_le); struct brcmf_iscan_params_le *params; s32 err = 0; if (ssid && ssid->SSID_len) params_size += sizeof(struct brcmf_ssid); params = kzalloc(params_size, GFP_KERNEL); if (!params) return -ENOMEM; BUG_ON(params_size >= BRCMF_DCMD_SMLEN); wl_iscan_prep(&params->params_le, ssid); params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); params->action = cpu_to_le16(action); params->scan_duration = cpu_to_le16(0); err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size, iscan->dcmd_buf, BRCMF_DCMD_SMLEN); if (err) { if (err == -EBUSY) WL_INFO("system busy : iscan canceled\n"); else WL_ERR("error (%d)\n", err); } kfree(params); return err; } static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); struct net_device *ndev = cfg_to_ndev(cfg_priv); struct brcmf_ssid ssid; __le32 passive_scan; s32 err = 0; /* Broadcast scan by default */ memset(&ssid, 0, sizeof(ssid)); iscan->state = WL_ISCAN_STATE_SCANING; passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan)); if (err) { WL_ERR("error (%d)\n", err); return err; } brcmf_set_mpc(ndev, 0); cfg_priv->iscan_kickstart = true; err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START); if (err) { brcmf_set_mpc(ndev, 1); cfg_priv->iscan_kickstart = false; return err; } mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct cfg80211_ssid *ssids; struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; __le32 passive_scan; bool iscan_req; bool spec_scan; s32 err = 0; u32 SSID_len; if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); return -EAGAIN; } if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { WL_ERR("Scanning being aborted : status (%lu)\n", cfg_priv->status); return -EAGAIN; } if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { WL_ERR("Connecting : status (%lu)\n", cfg_priv->status); return -EAGAIN; } iscan_req = false; spec_scan = false; if (request) { /* scan bss */ ssids = request->ssids; if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len)) iscan_req = true; } else { /* scan in ibss */ /* we don't do iscan in ibss */ ssids = this_ssid; } cfg_priv->scan_request = request; set_bit(WL_STATUS_SCANNING, &cfg_priv->status); if (iscan_req) { err = brcmf_do_iscan(cfg_priv); if (!err) return err; else goto scan_out; } else { WL_SCAN("ssid \"%s\", ssid_len (%d)\n", ssids->ssid, ssids->ssid_len); memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); sr->ssid_le.SSID_len = cpu_to_le32(0); if (SSID_len) { memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len); sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); spec_scan = true; } else { WL_SCAN("Broadcast scan\n"); } passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan)); if (err) { WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err); goto scan_out; } brcmf_set_mpc(ndev, 0); err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le, sizeof(sr->ssid_le)); if (err) { if (err == -EBUSY) WL_INFO("system busy : scan for \"%s\" " "canceled\n", sr->ssid_le.SSID); else WL_ERR("WLC_SCAN error (%d)\n", err); brcmf_set_mpc(ndev, 1); goto scan_out; } } return 0; scan_out: clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); cfg_priv->scan_request = NULL; return err; } static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); if (err) WL_ERR("scan error (%d)\n", err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold) { s32 err = 0; err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold); if (err) WL_ERR("Error (%d)\n", err); return err; } static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold) { s32 err = 0; err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold); if (err) WL_ERR("Error (%d)\n", err); return err; } static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l) { s32 err = 0; u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL); err = brcmf_exec_dcmd_u32(ndev, cmd, &retry); if (err) { WL_ERR("cmd (%d) , error (%d)\n", cmd, err); return err; } return err; } static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (changed & WIPHY_PARAM_RTS_THRESHOLD && (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) { cfg_priv->conf->rts_threshold = wiphy->rts_threshold; err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold); if (!err) goto done; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD && (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) { cfg_priv->conf->frag_threshold = wiphy->frag_threshold; err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold); if (!err) goto done; } if (changed & WIPHY_PARAM_RETRY_LONG && (cfg_priv->conf->retry_long != wiphy->retry_long)) { cfg_priv->conf->retry_long = wiphy->retry_long; err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true); if (!err) goto done; } if (changed & WIPHY_PARAM_RETRY_SHORT && (cfg_priv->conf->retry_short != wiphy->retry_short)) { cfg_priv->conf->retry_short = wiphy->retry_short; err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false); if (!err) goto done; } done: WL_TRACE("Exit\n"); return err; } static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item) { switch (item) { case WL_PROF_SEC: return &cfg_priv->profile->sec; case WL_PROF_BSSID: return &cfg_priv->profile->bssid; case WL_PROF_SSID: return &cfg_priv->profile->ssid; } WL_ERR("invalid item (%d)\n", item); return NULL; } static s32 brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e, void *data, s32 item) { s32 err = 0; struct brcmf_ssid *ssid; switch (item) { case WL_PROF_SSID: ssid = (struct brcmf_ssid *) data; memset(cfg_priv->profile->ssid.SSID, 0, sizeof(cfg_priv->profile->ssid.SSID)); memcpy(cfg_priv->profile->ssid.SSID, ssid->SSID, ssid->SSID_len); cfg_priv->profile->ssid.SSID_len = ssid->SSID_len; break; case WL_PROF_BSSID: if (data) memcpy(cfg_priv->profile->bssid, data, ETH_ALEN); else memset(cfg_priv->profile->bssid, 0, ETH_ALEN); break; case WL_PROF_SEC: memcpy(&cfg_priv->profile->sec, data, sizeof(cfg_priv->profile->sec)); break; case WL_PROF_BEACONINT: cfg_priv->profile->beacon_interval = *(u16 *)data; break; case WL_PROF_DTIMPERIOD: cfg_priv->profile->dtim_period = *(u8 *)data; break; default: WL_ERR("unsupported item (%d)\n", item); err = -EOPNOTSUPP; break; } return err; } static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof) { memset(prof, 0, sizeof(*prof)); } static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params, size_t *join_params_size) { u16 chanspec = 0; if (ch != 0) { if (ch <= CH_MAX_2G_CHANNEL) chanspec |= WL_CHANSPEC_BAND_2G; else chanspec |= WL_CHANSPEC_BAND_5G; chanspec |= WL_CHANSPEC_BW_20; chanspec |= WL_CHANSPEC_CTL_SB_NONE; *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE + sizeof(u16); chanspec |= (ch & WL_CHANSPEC_CHAN_MASK); join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec); join_params->params_le.chanspec_num = cpu_to_le32(1); WL_CONN("join_params->params.chanspec_list[0]= %#X," "channel %d, chanspec %#X\n", chanspec, ch, chanspec); } } static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev = NULL; s32 err = 0; WL_TRACE("Enter\n"); if (cfg_priv->link_up) { ndev = cfg_to_ndev(cfg_priv); WL_INFO("Call WLC_DISASSOC to stop excess roaming\n "); err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0); if (err) WL_ERR("WLC_DISASSOC failed (%d)\n", err); cfg_priv->link_up = false; } WL_TRACE("Exit\n"); } static s32 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ibss_params *params) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_join_params join_params; size_t join_params_size = 0; s32 err = 0; s32 wsec = 0; s32 bcnprd; struct brcmf_ssid ssid; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (params->ssid) WL_CONN("SSID: %s\n", params->ssid); else { WL_CONN("SSID: NULL, Not supported\n"); return -EOPNOTSUPP; } set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (params->bssid) WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n", params->bssid[0], params->bssid[1], params->bssid[2], params->bssid[3], params->bssid[4], params->bssid[5]); else WL_CONN("No BSSID specified\n"); if (params->channel) WL_CONN("channel: %d\n", params->channel->center_freq); else WL_CONN("no channel specified\n"); if (params->channel_fixed) WL_CONN("fixed channel required\n"); else WL_CONN("no fixed channel required\n"); if (params->ie && params->ie_len) WL_CONN("ie len: %d\n", params->ie_len); else WL_CONN("no ie specified\n"); if (params->beacon_interval) WL_CONN("beacon interval: %d\n", params->beacon_interval); else WL_CONN("no beacon interval specified\n"); if (params->basic_rates) WL_CONN("basic rates: %08X\n", params->basic_rates); else WL_CONN("no basic rates specified\n"); if (params->privacy) WL_CONN("privacy required\n"); else WL_CONN("no privacy required\n"); /* Configure Privacy for starter */ if (params->privacy) wsec |= WEP_ENABLED; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("wsec failed (%d)\n", err); goto done; } /* Configure Beacon Interval for starter */ if (params->beacon_interval) bcnprd = params->beacon_interval; else bcnprd = 100; err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd); if (err) { WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err); goto done; } /* Configure required join parameter */ memset(&join_params, 0, sizeof(struct brcmf_join_params)); /* SSID */ ssid.SSID_len = min_t(u32, params->ssid_len, 32); memcpy(ssid.SSID, params->ssid, ssid.SSID_len); memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len); join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); join_params_size = sizeof(join_params.ssid_le); brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); /* BSSID */ if (params->bssid) { memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); join_params_size = sizeof(join_params.ssid_le) + BRCMF_ASSOC_PARAMS_FIXED_SIZE; } else { memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); } brcmf_update_prof(cfg_priv, NULL, &join_params.params_le.bssid, WL_PROF_BSSID); /* Channel */ if (params->channel) { u32 target_channel; cfg_priv->channel = ieee80211_frequency_to_channel( params->channel->center_freq); if (params->channel_fixed) { /* adding chanspec */ brcmf_ch_to_chanspec(cfg_priv->channel, &join_params, &join_params_size); } /* set channel for starter */ target_channel = cfg_priv->channel; err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL, &target_channel); if (err) { WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err); goto done; } } else cfg_priv->channel = 0; cfg_priv->ibss_starter = false; err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) { WL_ERR("WLC_SET_SSID failed (%d)\n", err); goto done; } done: if (err) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; brcmf_link_down(cfg_priv); WL_TRACE("Exit\n"); return err; } static s32 brcmf_set_wpa_version(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; else val = WPA_AUTH_DISABLED; WL_CONN("setting wpa_auth to 0x%0x\n", val); err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); if (err) { WL_ERR("set wpa_auth failed (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->wpa_versions = sme->crypto.wpa_versions; return err; } static s32 brcmf_set_auth_type(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; switch (sme->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: val = 0; WL_CONN("open system\n"); break; case NL80211_AUTHTYPE_SHARED_KEY: val = 1; WL_CONN("shared key\n"); break; case NL80211_AUTHTYPE_AUTOMATIC: val = 2; WL_CONN("automatic\n"); break; case NL80211_AUTHTYPE_NETWORK_EAP: WL_CONN("network eap\n"); default: val = 2; WL_ERR("invalid auth type (%d)\n", sme->auth_type); break; } err = brcmf_dev_intvar_set(ndev, "auth", val); if (err) { WL_ERR("set auth failed (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->auth_type = sme->auth_type; return err; } static s32 brcmf_set_set_cipher(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 pval = 0; s32 gval = 0; s32 err = 0; if (sme->crypto.n_ciphers_pairwise) { switch (sme->crypto.ciphers_pairwise[0]) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: pval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: pval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: pval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: pval = AES_ENABLED; break; default: WL_ERR("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0]); return -EINVAL; } } if (sme->crypto.cipher_group) { switch (sme->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: gval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: gval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: gval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: gval = AES_ENABLED; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } WL_CONN("pval (%d) gval (%d)\n", pval, gval); err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval); if (err) { WL_ERR("error (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; sec->cipher_group = sme->crypto.cipher_group; return err; } static s32 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; if (sme->crypto.n_akm_suites) { err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val); if (err) { WL_ERR("could not get wpa_auth (%d)\n", err); return err; } if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA_AUTH_PSK; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA2_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA2_AUTH_PSK; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } WL_CONN("setting wpa_auth to %d\n", val); err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); if (err) { WL_ERR("could not set wpa_auth (%d)\n", err); return err; } } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->wpa_auth = sme->crypto.akm_suites[0]; return err; } static s32 brcmf_set_wep_sharedkey(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; struct brcmf_wsec_key key; s32 val; s32 err = 0; WL_CONN("key len (%d)\n", sme->key_len); if (sme->key_len == 0) return 0; sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n", sec->wpa_versions, sec->cipher_pairwise); if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) return 0; if (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) { memset(&key, 0, sizeof(key)); key.len = (u32) sme->key_len; key.index = (u32) sme->key_idx; if (key.len > sizeof(key.data)) { WL_ERR("Too long key length (%u)\n", key.len); return -EINVAL; } memcpy(key.data, sme->key, key.len); key.flags = BRCMF_PRIMARY_KEY; switch (sec->cipher_pairwise) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; break; default: WL_ERR("Invalid algorithm (%d)\n", sme->crypto.ciphers_pairwise[0]); return -EINVAL; } /* Set the new key/index */ WL_CONN("key length (%d) key index (%d) algo (%d)\n", key.len, key.index, key.algo); WL_CONN("key \"%s\"\n", key.data); err = send_key_to_dongle(ndev, &key); if (err) return err; if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) { WL_CONN("set auth_type to shared key\n"); val = 1; /* shared key */ err = brcmf_dev_intvar_set(ndev, "auth", val); if (err) { WL_ERR("set auth failed (%d)\n", err); return err; } } } return err; } static s32 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct ieee80211_channel *chan = sme->channel; struct brcmf_join_params join_params; size_t join_params_size; struct brcmf_ssid ssid; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (!sme->ssid) { WL_ERR("Invalid ssid\n"); return -EOPNOTSUPP; } set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (chan) { cfg_priv->channel = ieee80211_frequency_to_channel(chan->center_freq); WL_CONN("channel (%d), center_req (%d)\n", cfg_priv->channel, chan->center_freq); } else cfg_priv->channel = 0; WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); err = brcmf_set_wpa_version(ndev, sme); if (err) { WL_ERR("wl_set_wpa_version failed (%d)\n", err); goto done; } err = brcmf_set_auth_type(ndev, sme); if (err) { WL_ERR("wl_set_auth_type failed (%d)\n", err); goto done; } err = brcmf_set_set_cipher(ndev, sme); if (err) { WL_ERR("wl_set_set_cipher failed (%d)\n", err); goto done; } err = brcmf_set_key_mgmt(ndev, sme); if (err) { WL_ERR("wl_set_key_mgmt failed (%d)\n", err); goto done; } err = brcmf_set_wep_sharedkey(ndev, sme); if (err) { WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err); goto done; } memset(&join_params, 0, sizeof(join_params)); join_params_size = sizeof(join_params.ssid_le); ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len); memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len); memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len); join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN) WL_CONN("ssid \"%s\", len (%d)\n", ssid.SSID, ssid.SSID_len); brcmf_ch_to_chanspec(cfg_priv->channel, &join_params, &join_params_size); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) WL_ERR("WLC_SET_SSID failed (%d)\n", err); done: if (err) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, u16 reason_code) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scbval; s32 err = 0; WL_TRACE("Enter. Reason code = %d\n", reason_code); if (!check_sys_up(wiphy)) return -EIO; clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status); memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN); scbval.val = cpu_to_le32(reason_code); err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval, sizeof(struct brcmf_scb_val_le)); if (err) WL_ERR("error (%d)\n", err); cfg_priv->link_up = false; WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, s32 mbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); u16 txpwrmw; s32 err = 0; s32 disable = 0; s32 dbm = MBM_TO_DBM(mbm); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; switch (type) { case NL80211_TX_POWER_AUTOMATIC: break; case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (dbm < 0) { WL_ERR("TX_POWER_FIXED - dbm is negative\n"); err = -EINVAL; goto done; } break; } /* Make sure radio is off or on as far as software is concerned */ disable = WL_RADIO_SW_DISABLE << 16; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable); if (err) WL_ERR("WLC_SET_RADIO error (%d)\n", err); if (dbm > 0xffff) txpwrmw = 0xffff; else txpwrmw = (u16) dbm; err = brcmf_dev_intvar_set(ndev, "qtxpower", (s32) (brcmf_mw_to_qdbm(txpwrmw))); if (err) WL_ERR("qtxpower error (%d)\n", err); cfg_priv->conf->tx_power = dbm; done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); s32 txpwrdbm; u8 result; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm); if (err) { WL_ERR("error (%d)\n", err); goto done; } result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE); *dbm = (s32) brcmf_qdbm_to_mw(result); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool unicast, bool multicast) { u32 index; u32 wsec; s32 err = 0; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); if (err) { WL_ERR("WLC_GET_WSEC error (%d)\n", err); goto done; } if (wsec & WEP_ENABLED) { /* Just select a new current key */ index = key_idx; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY, &index); if (err) WL_ERR("error (%d)\n", err); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, const u8 *mac_addr, struct key_params *params) { struct brcmf_wsec_key key; struct brcmf_wsec_key_le key_le; s32 err = 0; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ if (!is_multicast_ether_addr(mac_addr)) memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); key.len = (u32) params->key_len; /* check for key index change */ if (key.len == 0) { /* key delete */ err = send_key_to_dongle(ndev, &key); if (err) return err; } else { if (key.len > sizeof(key.data)) { WL_ERR("Invalid key length (%d)\n", key.len); return -EINVAL; } WL_CONN("Setting the key index %d\n", key.index); memcpy(key.data, params->key, key.len); if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { u8 keybuf[8]; memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); } /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ if (params->seq && params->seq_len == 6) { /* rx iv */ u8 *ivptr; ivptr = (u8 *) params->seq; key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | (ivptr[3] << 8) | ivptr[2]; key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; key.iv_initialized = true; } switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); break; case WLAN_CIPHER_SUITE_TKIP: key.algo = CRYPTO_ALGO_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); break; default: WL_ERR("Invalid cipher (0x%x)\n", params->cipher); return -EINVAL; } convert_key_from_CPU(&key, &key_le); brcmf_netdev_wait_pend8021x(ndev); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); if (err) { WL_ERR("WLC_SET_KEY error (%d)\n", err); return err; } } return err; } static s32 brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct brcmf_wsec_key key; s32 val; s32 wsec; s32 err = 0; u8 keybuf[8]; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; if (mac_addr) { WL_TRACE("Exit"); return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); } memset(&key, 0, sizeof(key)); key.len = (u32) params->key_len; key.index = (u32) key_idx; if (key.len > sizeof(key.data)) { WL_ERR("Too long key length (%u)\n", key.len); err = -EINVAL; goto done; } memcpy(key.data, params->key, key.len); key.flags = BRCMF_PRIMARY_KEY; switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); break; case WLAN_CIPHER_SUITE_TKIP: memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); key.algo = CRYPTO_ALGO_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); break; default: WL_ERR("Invalid cipher (0x%x)\n", params->cipher); err = -EINVAL; goto done; } err = send_key_to_dongle(ndev, &key); /* Set the new key/index */ if (err) goto done; val = WEP_ENABLED; err = brcmf_dev_intvar_get(ndev, "wsec", &wsec); if (err) { WL_ERR("get wsec error (%d)\n", err); goto done; } wsec &= ~(WEP_ENABLED); wsec |= val; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("set wsec error (%d)\n", err); goto done; } val = 1; /* assume shared key. otherwise 0 */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val); if (err) WL_ERR("WLC_SET_AUTH error (%d)\n", err); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct brcmf_wsec_key key; s32 err = 0; s32 val; s32 wsec; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; key.flags = BRCMF_PRIMARY_KEY; key.algo = CRYPTO_ALGO_OFF; WL_CONN("key index (%d)\n", key_idx); /* Set the new key/index */ err = send_key_to_dongle(ndev, &key); if (err) { if (err == -EINVAL) { if (key.index >= DOT11_MAX_DEFAULT_KEYS) /* we ignore this key index in this case */ WL_ERR("invalid key index (%d)\n", key_idx); } /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } val = 0; err = brcmf_dev_intvar_get(ndev, "wsec", &wsec); if (err) { WL_ERR("get wsec error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } wsec &= ~(WEP_ENABLED); wsec |= val; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("set wsec error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } val = 0; /* assume open key. otherwise 1 */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val); if (err) { WL_ERR("WLC_SET_AUTH error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params * params)) { struct key_params params; struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_security *sec; s32 wsec; s32 err = 0; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; memset(&params, 0, sizeof(params)); err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); if (err) { WL_ERR("WLC_GET_WSEC error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } switch (wsec) { case WEP_ENABLED: sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { params.cipher = WLAN_CIPHER_SUITE_WEP40; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { params.cipher = WLAN_CIPHER_SUITE_WEP104; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); } break; case TKIP_ENABLED: params.cipher = WLAN_CIPHER_SUITE_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case AES_ENABLED: params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; default: WL_ERR("Invalid algo (0x%x)\n", wsec); err = -EINVAL; goto done; } callback(cookie, &params); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx) { WL_INFO("Not supported\n"); return -EOPNOTSUPP; } static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, u8 *mac, struct station_info *sinfo) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scb_val; int rssi; s32 rate; s32 err = 0; u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (memcmp(mac, bssid, ETH_ALEN)) { WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X" "wl_bssid-%X:%X:%X:%X:%X:%X\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]); err = -ENOENT; goto done; } /* Report the current tx rate */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate); if (err) { WL_ERR("Could not get rate (%d)\n", err); } else { sinfo->filled |= STATION_INFO_TX_BITRATE; sinfo->txrate.legacy = rate * 5; WL_CONN("Rate %d Mbps\n", rate / 2); } if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { scb_val.val = cpu_to_le32(0); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, sizeof(struct brcmf_scb_val_le)); if (err) WL_ERR("Could not get rssi (%d)\n", err); rssi = le32_to_cpu(scb_val.val); sinfo->filled |= STATION_INFO_SIGNAL; sinfo->signal = rssi; WL_CONN("RSSI %d dBm\n", rssi); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, bool enabled, s32 timeout) { s32 pm; s32 err = 0; struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); WL_TRACE("Enter\n"); /* * Powersave enable/disable request is coming from the * cfg80211 even before the interface is up. In that * scenario, driver will be storing the power save * preference in cfg_priv struct to apply this to * FW later while initializing the dongle */ cfg_priv->pwr_save = enabled; if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Device is not ready," "storing the value in cfg_priv struct\n"); goto done; } pm = enabled ? PM_FAST : PM_OFF; WL_INFO("power save %s\n", (pm ? "enabled" : "disabled")); err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm); if (err) { if (err == -ENODEV) WL_ERR("net_device is not ready yet\n"); else WL_ERR("error (%d)\n", err); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev, const u8 *addr, const struct cfg80211_bitrate_mask *mask) { struct brcm_rateset_le rateset_le; s32 rate; s32 val; s32 err_bg; s32 err_a; u32 legacy; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; /* addr param is always NULL. ignore it */ /* Get current rateset */ err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le, sizeof(rateset_le)); if (err) { WL_ERR("could not get current rateset (%d)\n", err); goto done; } legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF); if (!legacy) legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy & 0xFFFF); val = wl_g_rates[legacy - 1].bitrate * 100000; if (val < le32_to_cpu(rateset_le.count)) /* Select rate by rateset index */ rate = rateset_le.rates[val] & 0x7f; else /* Specified rate in bps */ rate = val / 500000; WL_CONN("rate %d mbps\n", rate / 2); /* * * Set rate override, * Since the is a/b/g-blind, both a/bg_rate are enforced. */ err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate); err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate); if (err_bg && err_a) { WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a); err = err_bg | err_a; } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; struct ieee80211_supported_band *band; s32 err = 0; u16 channel; u32 freq; u16 notify_capability; u16 notify_interval; u8 *notify_ie; size_t notify_ielen; s32 notify_signal; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { WL_ERR("Bss info is larger than buffer. Discarding\n"); return 0; } channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", bi->BSSID[0], bi->BSSID[1], bi->BSSID[2], bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]); WL_CONN("Channel: %d(%d)\n", channel, freq); WL_CONN("Capability: %X\n", notify_capability); WL_CONN("Beacon interval: %d\n", notify_interval); WL_CONN("Signal: %d\n", notify_signal); bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID, 0, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); if (!bss) return -ENOMEM; cfg80211_put_bss(bss); return err; } static struct brcmf_bss_info_le * next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss) { if (bss == NULL) return list->bss_info_le; return (struct brcmf_bss_info_le *)((unsigned long)bss + le32_to_cpu(bss->length)); } static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_scan_results *bss_list; struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ s32 err = 0; int i; bss_list = cfg_priv->bss_list; if (bss_list->version != BRCMF_BSS_INFO_VERSION) { WL_ERR("Version %d != WL_BSS_INFO_VERSION\n", bss_list->version); return -EOPNOTSUPP; } WL_SCAN("scanned AP count (%d)\n", bss_list->count); for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { bi = next_bss_le(bss_list, bi); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) break; } return err; } static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const u8 *bssid) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; struct brcmf_bss_info_le *bi = NULL; struct ieee80211_supported_band *band; struct cfg80211_bss *bss; u8 *buf = NULL; s32 err = 0; u16 channel; u32 freq; u16 notify_capability; u16 notify_interval; u8 *notify_ie; size_t notify_ielen; s32 notify_signal; WL_TRACE("Enter\n"); buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); if (buf == NULL) { err = -ENOMEM; goto CleanUp; } *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); if (err) { WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err); goto CleanUp; } bi = (struct brcmf_bss_info_le *)(buf + 4); channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; WL_CONN("channel: %d(%d)\n", channel, freq); WL_CONN("capability: %X\n", notify_capability); WL_CONN("beacon interval: %d\n", notify_interval); WL_CONN("signal: %d\n", notify_signal); bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, 0, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); if (!bss) { err = -ENOMEM; goto CleanUp; } cfg80211_put_bss(bss); CleanUp: kfree(buf); WL_TRACE("Exit\n"); return err; } static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv) { return cfg_priv->conf->mode == WL_MODE_IBSS; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) { struct brcmf_tlv *elt; int totlen; elt = (struct brcmf_tlv *) buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (len + 2))) return elt; elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2)); totlen -= (len + 2); } return NULL; } static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_bss_info_le *bi; struct brcmf_ssid *ssid; struct brcmf_tlv *tim; u16 beacon_interval; u8 dtim_period; size_t ie_len; u8 *ie; s32 err = 0; WL_TRACE("Enter\n"); if (brcmf_is_ibssmode(cfg_priv)) return err; ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID); *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO, cfg_priv->extra_buf, WL_EXTRA_BUF_MAX); if (err) { WL_ERR("Could not get bss info %d\n", err); goto update_bss_info_out; } bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) goto update_bss_info_out; ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset); ie_len = le32_to_cpu(bi->ie_length); beacon_interval = le16_to_cpu(bi->beacon_period); tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM); if (tim) dtim_period = tim->data[1]; else { /* * active scan was done so we could not get dtim * information out of probe response. * so we speficially query dtim information to dongle. */ u32 var; err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv), "dtim_assoc", &var); if (err) { WL_ERR("wl dtim_assoc failed (%d)\n", err); goto update_bss_info_out; } dtim_period = (u8)var; } brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT); brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD); update_bss_info_out: WL_TRACE("Exit"); return err; } static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); struct brcmf_ssid ssid; if (cfg_priv->iscan_on) { iscan->state = WL_ISCAN_STATE_IDLE; if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } cancel_work_sync(&iscan->work); /* Abort iscan running in FW */ memset(&ssid, 0, sizeof(ssid)); brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT); } } static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan, bool aborted) { struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); struct net_device *ndev = cfg_to_ndev(cfg_priv); if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scan complete while device not scanning\n"); return; } if (cfg_priv->scan_request) { WL_SCAN("ISCAN Completed scan: %s\n", aborted ? "Aborted" : "Done"); cfg80211_scan_done(cfg_priv->scan_request, aborted); brcmf_set_mpc(ndev, 1); cfg_priv->scan_request = NULL; } cfg_priv->iscan_kickstart = false; } static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan) { if (iscan->state != WL_ISCAN_STATE_IDLE) { WL_SCAN("wake up iscan\n"); schedule_work(&iscan->work); return 0; } return -EIO; } static s32 brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status, struct brcmf_scan_results **bss_list) { struct brcmf_iscan_results list; struct brcmf_scan_results *results; struct brcmf_scan_results_le *results_le; struct brcmf_iscan_results *list_buf; s32 err = 0; memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX); list_buf = (struct brcmf_iscan_results *)iscan->scan_buf; results = &list_buf->results; results_le = &list_buf->results_le; results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE; results->version = 0; results->count = 0; memset(&list, 0, sizeof(list)); list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX); err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list, BRCMF_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf, WL_ISCAN_BUF_MAX); if (err) { WL_ERR("error (%d)\n", err); return err; } results->buflen = le32_to_cpu(results_le->buflen); results->version = le32_to_cpu(results_le->version); results->count = le32_to_cpu(results_le->count); WL_SCAN("results->count = %d\n", results_le->count); WL_SCAN("results->buflen = %d\n", results_le->buflen); *status = le32_to_cpu(list_buf->status_le); WL_SCAN("status = %d\n", *status); *bss_list = results; return err; } static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; brcmf_inform_bss(cfg_priv); brcmf_notify_iscan_complete(iscan, false); return err; } static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; brcmf_inform_bss(cfg_priv); brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE); /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; brcmf_notify_iscan_complete(iscan, true); return err; } static void brcmf_cfg80211_iscan_handler(struct work_struct *work) { struct brcmf_cfg80211_iscan_ctrl *iscan = container_of(work, struct brcmf_cfg80211_iscan_ctrl, work); struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); struct brcmf_cfg80211_iscan_eloop *el = &iscan->el; u32 status = BRCMF_SCAN_RESULTS_PARTIAL; if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) { status = BRCMF_SCAN_RESULTS_ABORTED; WL_ERR("Abort iscan\n"); } el->handler[status](cfg_priv); } static void brcmf_iscan_timer(unsigned long data) { struct brcmf_cfg80211_iscan_ctrl *iscan = (struct brcmf_cfg80211_iscan_ctrl *)data; if (iscan) { iscan->timer_on = 0; WL_SCAN("timer expired\n"); brcmf_wakeup_iscan(iscan); } } static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); if (cfg_priv->iscan_on) { iscan->state = WL_ISCAN_STATE_IDLE; INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler); } return 0; } static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el) { memset(el, 0, sizeof(*el)); el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done; el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress; el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending; el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted; el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted; } static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); int err = 0; if (cfg_priv->iscan_on) { iscan->ndev = cfg_to_ndev(cfg_priv); brcmf_init_iscan_eloop(&iscan->el); iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; init_timer(&iscan->timer); iscan->timer.data = (unsigned long) iscan; iscan->timer.function = brcmf_iscan_timer; err = brcmf_invoke_iscan(cfg_priv); if (!err) iscan->data = cfg_priv; } return err; } static __always_inline void brcmf_delay(u32 ms) { if (ms < 1000 / HZ) { cond_resched(); mdelay(ms); } else { msleep(ms); } } static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); /* * Check for WL_STATUS_READY before any function call which * could result is bus access. Don't block the resume for * any driver error conditions */ WL_TRACE("Enter\n"); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_invoke_iscan(wiphy_to_cfg(wiphy)); WL_TRACE("Exit\n"); return 0; } static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); WL_TRACE("Enter\n"); /* * Check for WL_STATUS_READY before any function call which * could result is bus access. Don't block the suspend for * any driver error conditions */ /* * While going to suspend if associated with AP disassociate * from AP to save power while system is in suspended state */ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Disassociating from AP" " while entering suspend state\n"); brcmf_link_down(cfg_priv); /* * Make sure WPA_Supplicant receives all the event * generated due to DISASSOC call to the fw to keep * the state fw and WPA_Supplicant state consistent */ brcmf_delay(500); } set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_term_iscan(cfg_priv); if (cfg_priv->scan_request) { /* Indidate scan abort to cfg80211 layer */ WL_INFO("Terminating scan in progress\n"); cfg80211_scan_done(cfg_priv->scan_request, true); cfg_priv->scan_request = NULL; } clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); /* Turn off watchdog timer */ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Enable MPC\n"); brcmf_set_mpc(ndev, 1); } WL_TRACE("Exit\n"); return 0; } static __used s32 brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); u32 buflen; buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); BUG_ON(!buflen); return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf, buflen); } static s32 brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf, s32 buf_len) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); u32 len; s32 err = 0; len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); if (err) { WL_ERR("error (%d)\n", err); return err; } memcpy(buf, cfg_priv->dcmd_buf, buf_len); return err; } static __used s32 brcmf_update_pmklist(struct net_device *ndev, struct brcmf_cfg80211_pmk_list *pmk_list, s32 err) { int i, j; int pmkid_len; pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid); WL_CONN("No of elements %d\n", pmkid_len); for (i = 0; i < pmkid_len; i++) { WL_CONN("PMKID[%d]: %pM =\n", i, &pmk_list->pmkids.pmkid[i].BSSID); for (j = 0; j < WLAN_PMKID_LEN; j++) WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]); } if (!err) brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list, sizeof(*pmk_list)); return err; } static s32 brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_pmksa *pmksa) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids; s32 err = 0; int i; int pmkid_len; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; pmkid_len = le32_to_cpu(pmkids->npmkid); for (i = 0; i < pmkid_len; i++) if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN)) break; if (i < WL_NUM_PMKIDS_MAX) { memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN); memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); if (i == pmkid_len) { pmkid_len++; pmkids->npmkid = cpu_to_le32(pmkid_len); } } else err = -EINVAL; WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", pmkids->pmkid[pmkid_len].BSSID); for (i = 0; i < WLAN_PMKID_LEN; i++) WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_pmksa *pmksa) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct pmkid_list pmkid; s32 err = 0; int i, pmkid_len; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN); memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", &pmkid.pmkid[0].BSSID); for (i = 0; i < WLAN_PMKID_LEN; i++) WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]); pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid); for (i = 0; i < pmkid_len; i++) if (!memcmp (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, ETH_ALEN)) break; if ((pmkid_len > 0) && (i < pmkid_len)) { memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0, sizeof(struct pmkid)); for (; i < (pmkid_len - 1); i++) { memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID, ETH_ALEN); memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID, &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID, WLAN_PMKID_LEN); } cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1); } else err = -EINVAL; err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list)); err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static struct cfg80211_ops wl_cfg80211_ops = { .change_virtual_intf = brcmf_cfg80211_change_iface, .scan = brcmf_cfg80211_scan, .set_wiphy_params = brcmf_cfg80211_set_wiphy_params, .join_ibss = brcmf_cfg80211_join_ibss, .leave_ibss = brcmf_cfg80211_leave_ibss, .get_station = brcmf_cfg80211_get_station, .set_tx_power = brcmf_cfg80211_set_tx_power, .get_tx_power = brcmf_cfg80211_get_tx_power, .add_key = brcmf_cfg80211_add_key, .del_key = brcmf_cfg80211_del_key, .get_key = brcmf_cfg80211_get_key, .set_default_key = brcmf_cfg80211_config_default_key, .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key, .set_power_mgmt = brcmf_cfg80211_set_power_mgmt, .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask, .connect = brcmf_cfg80211_connect, .disconnect = brcmf_cfg80211_disconnect, .suspend = brcmf_cfg80211_suspend, .resume = brcmf_cfg80211_resume, .set_pmksa = brcmf_cfg80211_set_pmksa, .del_pmksa = brcmf_cfg80211_del_pmksa, .flush_pmksa = brcmf_cfg80211_flush_pmksa }; static s32 brcmf_mode_to_nl80211_iftype(s32 mode) { s32 err = 0; switch (mode) { case WL_MODE_BSS: return NL80211_IFTYPE_STATION; case WL_MODE_IBSS: return NL80211_IFTYPE_ADHOC; default: return NL80211_IFTYPE_UNSPECIFIED; } return err; } static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface, struct device *ndev) { struct wireless_dev *wdev; s32 err = 0; wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); wdev->wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_priv) + sizeof_iface); if (!wdev->wiphy) { WL_ERR("Could not allocate wiphy device\n"); err = -ENOMEM; goto wiphy_new_out; } set_wiphy_dev(wdev->wiphy, ndev); wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set * it as 11a by default. * This will be updated with * 11n phy tables in * "ifconfig up" * if phy has 11n capability */ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->cipher_suites = __wl_cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power * save mode * by default */ err = wiphy_register(wdev->wiphy); if (err < 0) { WL_ERR("Could not register wiphy device (%d)\n", err); goto wiphy_register_out; } return wdev; wiphy_register_out: wiphy_free(wdev->wiphy); wiphy_new_out: kfree(wdev); return ERR_PTR(err); } static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv) { struct wireless_dev *wdev = cfg_priv->wdev; if (!wdev) { WL_ERR("wdev is invalid\n"); return; } wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); cfg_priv->wdev = NULL; } static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { WL_CONN("Processing set ssid\n"); cfg_priv->link_up = true; return true; } return false; } static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u16 flags = be16_to_cpu(e->flags); if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) { WL_CONN("Processing link down\n"); return true; } return false; } static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) { WL_CONN("Processing Link %s & no network found\n", be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ? "up" : "down"); return true; } if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) { WL_CONN("Processing connecting & no network found\n"); return true; } return false; } static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); kfree(conn_info->req_ie); conn_info->req_ie = NULL; conn_info->req_ie_len = 0; kfree(conn_info->resp_ie); conn_info->resp_ie = NULL; conn_info->resp_ie_len = 0; } static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev = cfg_to_ndev(cfg_priv); struct brcmf_cfg80211_assoc_ielen_le *assoc_info; struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); u32 req_len; u32 resp_len; s32 err = 0; brcmf_clear_assoc_ies(cfg_priv); err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc info (%d)\n", err); return err; } assoc_info = (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf; req_len = le32_to_cpu(assoc_info->req_len); resp_len = le32_to_cpu(assoc_info->resp_len); if (req_len) { err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc req (%d)\n", err); return err; } conn_info->req_ie_len = req_len; conn_info->req_ie = kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len, GFP_KERNEL); } else { conn_info->req_ie_len = 0; conn_info->req_ie = NULL; } if (resp_len) { err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc resp (%d)\n", err); return err; } conn_info->resp_ie_len = resp_len; conn_info->resp_ie = kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len, GFP_KERNEL); } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; } WL_CONN("req len (%d) resp len (%d)\n", conn_info->req_ie_len, conn_info->resp_ie_len); return err; } static s32 brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct brcmf_channel_info_le channel_le; struct ieee80211_channel *notify_channel; struct ieee80211_supported_band *band; u32 freq; s32 err = 0; u32 target_channel; WL_TRACE("Enter\n"); brcmf_get_assoc_ies(cfg_priv); brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); brcmf_update_bss_info(cfg_priv); brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, sizeof(channel_le)); target_channel = le32_to_cpu(channel_le.target_channel); WL_CONN("Roamed to channel %d\n", target_channel); if (target_channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(target_channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); cfg80211_roamed(ndev, notify_channel, (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID), conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); WL_CONN("Report roaming result\n"); set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, bool completed) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); s32 err = 0; WL_TRACE("Enter\n"); if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { if (completed) { brcmf_get_assoc_ies(cfg_priv); brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); brcmf_update_bss_info(cfg_priv); } cfg80211_connect_result(ndev, (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID), conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT, GFP_KERNEL); if (completed) set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); WL_CONN("Report connect result - connection %s\n", completed ? "succeeded" : "failed"); } WL_TRACE("Exit\n"); return err; } static s32 brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { s32 err = 0; if (brcmf_is_linkup(cfg_priv, e)) { WL_CONN("Linkup\n"); if (brcmf_is_ibssmode(cfg_priv)) { brcmf_update_prof(cfg_priv, NULL, (void *)e->addr, WL_PROF_BSSID); wl_inform_ibss(cfg_priv, ndev, e->addr); cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); } else brcmf_bss_connect_done(cfg_priv, ndev, e, true); } else if (brcmf_is_linkdown(cfg_priv, e)) { WL_CONN("Linkdown\n"); if (brcmf_is_ibssmode(cfg_priv)) { clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (test_and_clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) brcmf_link_down(cfg_priv); } else { brcmf_bss_connect_done(cfg_priv, ndev, e, false); if (test_and_clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); brcmf_link_down(cfg_priv); } } brcmf_init_prof(cfg_priv->profile); } else if (brcmf_is_nonetwork(cfg_priv, e)) { if (brcmf_is_ibssmode(cfg_priv)) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); else brcmf_bss_connect_done(cfg_priv, ndev, e, false); } return err; } static s32 brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { s32 err = 0; u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) brcmf_bss_roaming_done(cfg_priv, ndev, e); else brcmf_bss_connect_done(cfg_priv, ndev, e, true); } return err; } static s32 brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { u16 flags = be16_to_cpu(e->flags); enum nl80211_key_type key_type; if (flags & BRCMF_EVENT_MSG_GROUP) key_type = NL80211_KEYTYPE_GROUP; else key_type = NL80211_KEYTYPE_PAIRWISE; cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, NULL, GFP_KERNEL); return 0; } static s32 brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { struct brcmf_channel_info_le channel_inform_le; struct brcmf_scan_results_le *bss_list_le; u32 len = WL_SCAN_BUF_MAX; s32 err = 0; bool scan_abort = false; u32 scan_channel; WL_TRACE("Enter\n"); if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) { WL_TRACE("Exit\n"); return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv)); } if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scan complete while device not scanning\n"); scan_abort = true; err = -EINVAL; goto scan_done_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le, sizeof(channel_inform_le)); if (err) { WL_ERR("scan busy (%d)\n", err); scan_abort = true; goto scan_done_out; } scan_channel = le32_to_cpu(channel_inform_le.scan_channel); if (scan_channel) WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel); cfg_priv->bss_list = cfg_priv->scan_results; bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list; memset(cfg_priv->scan_results, 0, len); bss_list_le->buflen = cpu_to_le32(len); err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS, cfg_priv->scan_results, len); if (err) { WL_ERR("%s Scan_results error (%d)\n", ndev->name, err); err = -EINVAL; scan_abort = true; goto scan_done_out; } cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen); cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version); cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count); err = brcmf_inform_bss(cfg_priv); if (err) { scan_abort = true; goto scan_done_out; } scan_done_out: if (cfg_priv->scan_request) { WL_SCAN("calling cfg80211_scan_done\n"); cfg80211_scan_done(cfg_priv->scan_request, scan_abort); brcmf_set_mpc(ndev, 1); cfg_priv->scan_request = NULL; } WL_TRACE("Exit\n"); return err; } static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf) { conf->mode = (u32)-1; conf->frag_threshold = (u32)-1; conf->rts_threshold = (u32)-1; conf->retry_short = (u32)-1; conf->retry_long = (u32)-1; conf->tx_power = -1; } static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el) { memset(el, 0, sizeof(*el)); el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status; el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status; el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status; el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status; el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status; } static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) { kfree(cfg_priv->scan_results); cfg_priv->scan_results = NULL; kfree(cfg_priv->bss_info); cfg_priv->bss_info = NULL; kfree(cfg_priv->conf); cfg_priv->conf = NULL; kfree(cfg_priv->profile); cfg_priv->profile = NULL; kfree(cfg_priv->scan_req_int); cfg_priv->scan_req_int = NULL; kfree(cfg_priv->dcmd_buf); cfg_priv->dcmd_buf = NULL; kfree(cfg_priv->extra_buf); cfg_priv->extra_buf = NULL; kfree(cfg_priv->iscan); cfg_priv->iscan = NULL; kfree(cfg_priv->pmk_list); cfg_priv->pmk_list = NULL; } static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) { cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); if (!cfg_priv->scan_results) goto init_priv_mem_out; cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL); if (!cfg_priv->conf) goto init_priv_mem_out; cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL); if (!cfg_priv->profile) goto init_priv_mem_out; cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); if (!cfg_priv->bss_info) goto init_priv_mem_out; cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int), GFP_KERNEL); if (!cfg_priv->scan_req_int) goto init_priv_mem_out; cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); if (!cfg_priv->dcmd_buf) goto init_priv_mem_out; cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (!cfg_priv->extra_buf) goto init_priv_mem_out; cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL); if (!cfg_priv->iscan) goto init_priv_mem_out; cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL); if (!cfg_priv->pmk_list) goto init_priv_mem_out; return 0; init_priv_mem_out: brcmf_deinit_priv_mem(cfg_priv); return -ENOMEM; } /* * retrieve first queued event from head */ static struct brcmf_cfg80211_event_q *brcmf_deq_event( struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_event_q *e = NULL; spin_lock_irq(&cfg_priv->evt_q_lock); if (!list_empty(&cfg_priv->evt_q_list)) { e = list_first_entry(&cfg_priv->evt_q_list, struct brcmf_cfg80211_event_q, evt_q_list); list_del(&e->evt_q_list); } spin_unlock_irq(&cfg_priv->evt_q_lock); return e; } /* * push event to tail of the queue * * remark: this function may not sleep as it is called in atomic context. */ static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, const struct brcmf_event_msg *msg) { struct brcmf_cfg80211_event_q *e; s32 err = 0; ulong flags; e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); if (!e) return -ENOMEM; e->etype = event; memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags); return err; } static void brcmf_put_event(struct brcmf_cfg80211_event_q *e) { kfree(e); } static void brcmf_cfg80211_event_handler(struct work_struct *work) { struct brcmf_cfg80211_priv *cfg_priv = container_of(work, struct brcmf_cfg80211_priv, event_work); struct brcmf_cfg80211_event_q *e; e = brcmf_deq_event(cfg_priv); if (unlikely(!e)) { WL_ERR("event queue empty...\n"); return; } do { WL_INFO("event type (%d)\n", e->etype); if (cfg_priv->el.handler[e->etype]) cfg_priv->el.handler[e->etype](cfg_priv, cfg_to_ndev(cfg_priv), &e->emsg, e->edata); else WL_INFO("Unknown Event (%d): ignoring\n", e->etype); brcmf_put_event(e); } while ((e = brcmf_deq_event(cfg_priv))); } static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv) { spin_lock_init(&cfg_priv->evt_q_lock); INIT_LIST_HEAD(&cfg_priv->evt_q_list); } static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_event_q *e; spin_lock_irq(&cfg_priv->evt_q_lock); while (!list_empty(&cfg_priv->evt_q_list)) { e = list_first_entry(&cfg_priv->evt_q_list, struct brcmf_cfg80211_event_q, evt_q_list); list_del(&e->evt_q_list); kfree(e); } spin_unlock_irq(&cfg_priv->evt_q_lock); } static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv) { s32 err = 0; cfg_priv->scan_request = NULL; cfg_priv->pwr_save = true; cfg_priv->iscan_on = true; /* iscan on & off switch. we enable iscan per default */ cfg_priv->roam_on = true; /* roam on & off switch. we enable roam per default */ cfg_priv->iscan_kickstart = false; cfg_priv->active_scan = true; /* we do active scan for specific scan per default */ cfg_priv->dongle_up = false; /* dongle is not up yet */ brcmf_init_eq(cfg_priv); err = brcmf_init_priv_mem(cfg_priv); if (err) return err; INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler); brcmf_init_eloop_handler(&cfg_priv->el); mutex_init(&cfg_priv->usr_sync); err = brcmf_init_iscan(cfg_priv); if (err) return err; brcmf_init_conf(cfg_priv->conf); brcmf_init_prof(cfg_priv->profile); brcmf_link_down(cfg_priv); return err; } static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv) { cancel_work_sync(&cfg_priv->event_work); cfg_priv->dongle_up = false; /* dongle down */ brcmf_flush_eq(cfg_priv); brcmf_link_down(cfg_priv); brcmf_term_iscan(cfg_priv); brcmf_deinit_priv_mem(cfg_priv); } struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, struct device *busdev, void *data) { struct wireless_dev *wdev; struct brcmf_cfg80211_priv *cfg_priv; struct brcmf_cfg80211_iface *ci; struct brcmf_cfg80211_dev *cfg_dev; s32 err = 0; if (!ndev) { WL_ERR("ndev is invalid\n"); return NULL; } cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL); if (!cfg_dev) return NULL; wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev); if (IS_ERR(wdev)) { kfree(cfg_dev); return NULL; } wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS); cfg_priv = wdev_to_cfg(wdev); cfg_priv->wdev = wdev; cfg_priv->pub = data; ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci; ci->cfg_priv = cfg_priv; ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; err = wl_init_priv(cfg_priv); if (err) { WL_ERR("Failed to init iwm_priv (%d)\n", err); goto cfg80211_attach_out; } brcmf_set_drvdata(cfg_dev, ci); return cfg_dev; cfg80211_attach_out: brcmf_free_wdev(cfg_priv); kfree(cfg_dev); return NULL; } void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; cfg_priv = brcmf_priv_get(cfg_dev); wl_deinit_priv(cfg_priv); brcmf_free_wdev(cfg_priv); brcmf_set_drvdata(cfg_dev, NULL); kfree(cfg_dev); } void brcmf_cfg80211_event(struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { u32 event_type = be32_to_cpu(e->event_type); struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); if (!brcmf_enq_event(cfg_priv, event_type, e)) schedule_work(&cfg_priv->event_work); } static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype) { s32 infra = 0; s32 err = 0; switch (iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: WL_ERR("type (%d) : currently we do not support this mode\n", iftype); err = -EINVAL; return err; case NL80211_IFTYPE_ADHOC: infra = 0; break; case NL80211_IFTYPE_STATION: infra = 1; break; default: err = -EINVAL; WL_ERR("invalid type (%d)\n", iftype); return err; } err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); if (err) { WL_ERR("WLC_SET_INFRA error (%d)\n", err); return err; } return 0; } static s32 brcmf_dongle_eventmsg(struct net_device *ndev) { /* Room for "event_msgs" + '\0' + bitvec */ s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; s8 eventmask[BRCMF_EVENTING_MASK_LEN]; s32 err = 0; WL_TRACE("Enter\n"); /* Setup event_msgs */ brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("Get event_msgs error (%d)\n", err); goto dongle_eventmsg_out; } memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN); setbit(eventmask, BRCMF_E_SET_SSID); setbit(eventmask, BRCMF_E_ROAM); setbit(eventmask, BRCMF_E_PRUNE); setbit(eventmask, BRCMF_E_AUTH); setbit(eventmask, BRCMF_E_REASSOC); setbit(eventmask, BRCMF_E_REASSOC_IND); setbit(eventmask, BRCMF_E_DEAUTH_IND); setbit(eventmask, BRCMF_E_DISASSOC_IND); setbit(eventmask, BRCMF_E_DISASSOC); setbit(eventmask, BRCMF_E_JOIN); setbit(eventmask, BRCMF_E_ASSOC_IND); setbit(eventmask, BRCMF_E_PSK_SUP); setbit(eventmask, BRCMF_E_LINK); setbit(eventmask, BRCMF_E_NDIS_LINK); setbit(eventmask, BRCMF_E_MIC_ERROR); setbit(eventmask, BRCMF_E_PMKID_CACHE); setbit(eventmask, BRCMF_E_TXFAIL); setbit(eventmask, BRCMF_E_JOIN_START); setbit(eventmask, BRCMF_E_SCAN_COMPLETE); brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("Set event_msgs error (%d)\n", err); goto dongle_eventmsg_out; } dongle_eventmsg_out: WL_TRACE("Exit\n"); return err; } static s32 brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) { s8 iovbuf[32]; s32 err = 0; __le32 roamtrigger[2]; __le32 roam_delta[2]; __le32 bcn_to_le; __le32 roamvar_le; /* * Setup timeout if Beacons are lost and roam is * off to report link down */ if (roamvar) { bcn_to_le = cpu_to_le32(bcn_timeout); brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le, sizeof(bcn_to_le), iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("bcn_timeout error (%d)\n", err); goto dongle_rom_out; } } /* * Enable/Disable built-in roaming to allow supplicant * to take care of roaming */ WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On"); roamvar_le = cpu_to_le32(roamvar); brcmf_c_mkiovar("roam_off", (char *)&roamvar_le, sizeof(roamvar_le), iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("roam_off error (%d)\n", err); goto dongle_rom_out; } roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL); roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER, (void *)roamtrigger, sizeof(roamtrigger)); if (err) { WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err); goto dongle_rom_out; } roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA); roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA, (void *)roam_delta, sizeof(roam_delta)); if (err) { WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err); goto dongle_rom_out; } dongle_rom_out: return err; } static s32 brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, s32 scan_unassoc_time, s32 scan_passive_time) { s32 err = 0; __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time); __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time); __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME, &scan_assoc_tm_le, sizeof(scan_assoc_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan assoc time is not supported\n"); else WL_ERR("Scan assoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME, &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan unassoc time is not supported\n"); else WL_ERR("Scan unassoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME, &scan_passive_tm_le, sizeof(scan_passive_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan passive time is not supported\n"); else WL_ERR("Scan passive time error (%d)\n", err); goto dongle_scantime_out; } dongle_scantime_out: return err; } static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv) { struct wiphy *wiphy; s32 phy_list; s8 phy; s32 err = 0; err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST, &phy_list, sizeof(phy_list)); if (err) { WL_ERR("error (%d)\n", err); return err; } phy = ((char *)&phy_list)[1]; WL_INFO("%c phy\n", phy); if (phy == 'n' || phy == 'a') { wiphy = cfg_to_wiphy(cfg_priv); wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; } return err; } static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv) { return wl_update_wiphybands(cfg_priv); } static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev; struct wireless_dev *wdev; s32 power_mode; s32 err = 0; if (cfg_priv->dongle_up) return err; ndev = cfg_to_ndev(cfg_priv); wdev = ndev->ieee80211_ptr; brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); err = brcmf_dongle_eventmsg(ndev); if (err) goto default_conf_out; power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode); if (err) goto default_conf_out; WL_INFO("power save set to %s\n", (power_mode ? "enabled" : "disabled")); err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1), WL_BEACON_TIMEOUT); if (err) goto default_conf_out; err = brcmf_dongle_mode(ndev, wdev->iftype); if (err && err != -EINPROGRESS) goto default_conf_out; err = brcmf_dongle_probecap(cfg_priv); if (err) goto default_conf_out; /* -EINPROGRESS: Call commit handler */ default_conf_out: cfg_priv->dongle_up = true; return err; } static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv) { char buf[10+IFNAMSIZ]; struct dentry *fd; s32 err = 0; sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name); cfg_priv->debugfsdir = debugfs_create_dir(buf, cfg_to_wiphy(cfg_priv)->debugfsdir); fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir, (u16 *)&cfg_priv->profile->beacon_interval); if (!fd) { err = -ENOMEM; goto err_out; } fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir, (u8 *)&cfg_priv->profile->dtim_period); if (!fd) { err = -ENOMEM; goto err_out; } err_out: return err; } static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv) { debugfs_remove_recursive(cfg_priv->debugfsdir); cfg_priv->debugfsdir = NULL; } static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv) { s32 err = 0; set_bit(WL_STATUS_READY, &cfg_priv->status); brcmf_debugfs_add_netdev_params(cfg_priv); err = brcmf_config_dongle(cfg_priv); if (err) return err; brcmf_invoke_iscan(cfg_priv); return err; } static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv) { /* * While going down, if associated with AP disassociate * from AP to save power */ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Disassociating from AP"); brcmf_link_down(cfg_priv); /* Make sure WPA_Supplicant receives all the event generated due to DISASSOC call to the fw to keep the state fw and WPA_Supplicant state consistent */ brcmf_delay(500); } set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); brcmf_term_iscan(cfg_priv); if (cfg_priv->scan_request) { cfg80211_scan_done(cfg_priv->scan_request, true); /* May need to perform this to cover rmmod */ /* wl_set_mpc(cfg_to_ndev(wl), 1); */ cfg_priv->scan_request = NULL; } clear_bit(WL_STATUS_READY, &cfg_priv->status); clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); brcmf_debugfs_remove_netdev(cfg_priv); return 0; } s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; s32 err = 0; cfg_priv = brcmf_priv_get(cfg_dev); mutex_lock(&cfg_priv->usr_sync); err = __brcmf_cfg80211_up(cfg_priv); mutex_unlock(&cfg_priv->usr_sync); return err; } s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; s32 err = 0; cfg_priv = brcmf_priv_get(cfg_dev); mutex_lock(&cfg_priv->usr_sync); err = __brcmf_cfg80211_down(cfg_priv); mutex_unlock(&cfg_priv->usr_sync); return err; } static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv, u8 t, u8 l, u8 *v) { struct brcmf_cfg80211_ie *ie = &cfg_priv->ie; s32 err = 0; if (ie->offset + l + 2 > WL_TLV_INFO_MAX) { WL_ERR("ei crosses buffer boundary\n"); return -ENOSPC; } ie->buf[ie->offset] = t; ie->buf[ie->offset + 1] = l; memcpy(&ie->buf[ie->offset + 2], v, l); ie->offset += l + 2; return err; }
gpl-2.0
Krabappel2548/apq8064_Revolution-_kernel
arch/arm/kernel/asm-offsets.c
4342
5950
/* * Copyright (C) 1995-2003 Russell King * 2001-2002 Keith Owens * * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/procinfo.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> /* * Make sure that the compiler and target are compatible. */ #if defined(__APCS_26__) #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 #endif /* * GCC 3.0, 3.1: general bad code generation. * GCC 3.2.0: incorrect function argument offset calculation. * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c * (http://gcc.gnu.org/PR8896) and incorrect structure * initialisation in fs/jffs2/erase.c */ #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) #error Your compiler is too buggy; it is known to miscompile kernels. #error Known good compilers: 3.3 #endif int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); #ifdef CONFIG_CC_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); #ifdef CONFIG_SMP DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); #endif #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif #ifdef CONFIG_CRUNCH DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); #endif BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2)); DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3)); DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4)); DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5)); DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6)); DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7)); DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8)); DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9)); DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10)); DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp)); DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip)); DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp)); DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr)); DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc)); DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr)); DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); #ifdef CONFIG_CACHE_L2X0 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency)); DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency)); DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start)); DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end)); DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl)); DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl)); BLANK(); #endif #ifdef CONFIG_CPU_HAS_ASID DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); BLANK(); #endif DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); BLANK(); DEFINE(VM_EXEC, VM_EXEC); BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); DEFINE(SYS_ERROR0, 0x9f0000); BLANK(); DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr)); DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); BLANK(); DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags)); DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags)); BLANK(); #ifdef MULTI_DABORT DEFINE(PROCESSOR_DABT_FUNC, offsetof(struct processor, _data_abort)); #endif #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif #ifdef MULTI_CPU DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); #endif #ifdef MULTI_CACHE DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); #endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; }
gpl-2.0
nsingh94/caf-7x30
drivers/net/ethernet/intel/igb/e1000_82575.c
4854
56655
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ /* e1000_82575 * e1000_82576 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/if_ether.h> #include "e1000_mac.h" #include "e1000_82575.h" static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *); static void igb_release_phy_82575(struct e1000_hw *); static s32 igb_acquire_nvm_82575(struct e1000_hw *); static void igb_release_nvm_82575(struct e1000_hw *); static s32 igb_check_for_link_82575(struct e1000_hw *); static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); static s32 igb_setup_copper_link_82575(struct e1000_hw *); static s32 igb_setup_serdes_link_82575(struct e1000_hw *); static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); static void igb_clear_hw_cntrs_82575(struct e1000_hw *); static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, u16 *); static s32 igb_get_phy_id_82575(struct e1000_hw *); static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); static bool igb_sgmii_active_82575(struct e1000_hw *); static s32 igb_reset_init_script_82575(struct e1000_hw *); static s32 igb_read_mac_addr_82575(struct e1000_hw *); static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; #define E1000_82580_RXPBS_TABLE_SIZE \ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) /** * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure * * Called to determine if the I2C pins are being used for I2C or as an * external MDIO interface since the two options are mutually exclusive. **/ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) { u32 reg = 0; bool ext_mdio = false; switch (hw->mac.type) { case e1000_82575: case e1000_82576: reg = rd32(E1000_MDIC); ext_mdio = !!(reg & E1000_MDIC_DEST); break; case e1000_82580: case e1000_i350: reg = rd32(E1000_MDICNFG); ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); break; default: break; } return ext_mdio; } static s32 igb_get_invariants_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; u32 eecd; s32 ret_val; u16 size; u32 ctrl_ext = 0; switch (hw->device_id) { case E1000_DEV_ID_82575EB_COPPER: case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82575GB_QUAD_COPPER: mac->type = e1000_82575; break; case E1000_DEV_ID_82576: case E1000_DEV_ID_82576_NS: case E1000_DEV_ID_82576_NS_SERDES: case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: case E1000_DEV_ID_82576_SERDES_QUAD: mac->type = e1000_82576; break; case E1000_DEV_ID_82580_COPPER: case E1000_DEV_ID_82580_FIBER: case E1000_DEV_ID_82580_QUAD_FIBER: case E1000_DEV_ID_82580_SERDES: case E1000_DEV_ID_82580_SGMII: case E1000_DEV_ID_82580_COPPER_DUAL: case E1000_DEV_ID_DH89XXCC_SGMII: case E1000_DEV_ID_DH89XXCC_SERDES: case E1000_DEV_ID_DH89XXCC_BACKPLANE: case E1000_DEV_ID_DH89XXCC_SFP: mac->type = e1000_82580; break; case E1000_DEV_ID_I350_COPPER: case E1000_DEV_ID_I350_FIBER: case E1000_DEV_ID_I350_SERDES: case E1000_DEV_ID_I350_SGMII: mac->type = e1000_i350; break; default: return -E1000_ERR_MAC_INIT; break; } /* Set media type */ /* * The 82575 uses bits 22:23 for link mode. The mode can be changed * based on the EEPROM. We cannot rely upon device ID. There * is no distinguishable difference between fiber and internal * SerDes mode on the 82575. There can be an external PHY attached * on the SGMII interface. For this, we'll set sgmii_active to true. */ phy->media_type = e1000_media_type_copper; dev_spec->sgmii_active = false; ctrl_ext = rd32(E1000_CTRL_EXT); switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { case E1000_CTRL_EXT_LINK_MODE_SGMII: dev_spec->sgmii_active = true; break; case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; break; default: break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; if (mac->type == e1000_82576) mac->rar_entry_count = E1000_RAR_ENTRIES_82576; if (mac->type == e1000_82580) mac->rar_entry_count = E1000_RAR_ENTRIES_82580; if (mac->type == e1000_i350) mac->rar_entry_count = E1000_RAR_ENTRIES_I350; /* reset */ if (mac->type >= e1000_82580) mac->ops.reset_hw = igb_reset_hw_82580; else mac->ops.reset_hw = igb_reset_hw_82575; /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ mac->arc_subsystem_valid = (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) ? true : false; /* enable EEE on i350 parts */ if (mac->type == e1000_i350) dev_spec->eee_disable = false; else dev_spec->eee_disable = true; /* physical interface link setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) ? igb_setup_copper_link_82575 : igb_setup_serdes_link_82575; /* NVM initialization */ eecd = rd32(E1000_EECD); nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* * Check for invalid size */ if ((hw->mac.type == e1000_82576) && (size > 15)) { pr_notice("The NVM size is not valid, defaulting to 32K\n"); size = 15; } nvm->word_size = 1 << size; if (nvm->word_size == (1 << 15)) nvm->page_size = 128; /* NVM Function Pointers */ nvm->ops.acquire = igb_acquire_nvm_82575; if (nvm->word_size < (1 << 15)) nvm->ops.read = igb_read_nvm_eerd; else nvm->ops.read = igb_read_nvm_spi; nvm->ops.release = igb_release_nvm_82575; switch (hw->mac.type) { case e1000_82580: nvm->ops.validate = igb_validate_nvm_checksum_82580; nvm->ops.update = igb_update_nvm_checksum_82580; break; case e1000_i350: nvm->ops.validate = igb_validate_nvm_checksum_i350; nvm->ops.update = igb_update_nvm_checksum_i350; break; default: nvm->ops.validate = igb_validate_nvm_checksum; nvm->ops.update = igb_update_nvm_checksum; } nvm->ops.write = igb_write_nvm_spi; /* if part supports SR-IOV then initialize mailbox parameters */ switch (mac->type) { case e1000_82576: case e1000_i350: igb_init_mbx_params_pf(hw); break; default: break; } /* setup PHY parameters */ if (phy->media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; return 0; } phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; ctrl_ext = rd32(E1000_CTRL_EXT); /* PHY function pointers */ if (igb_sgmii_active_82575(hw)) { phy->ops.reset = igb_phy_hw_reset_sgmii_82575; ctrl_ext |= E1000_CTRL_I2C_ENA; } else { phy->ops.reset = igb_phy_hw_reset; ctrl_ext &= ~E1000_CTRL_I2C_ENA; } wr32(E1000_CTRL_EXT, ctrl_ext); igb_reset_mdicnfg_82580(hw); if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; } else if (hw->mac.type >= e1000_82580) { phy->ops.read_reg = igb_read_phy_reg_82580; phy->ops.write_reg = igb_write_phy_reg_82580; } else { phy->ops.read_reg = igb_read_phy_reg_igp; phy->ops.write_reg = igb_write_phy_reg_igp; } /* set lan id */ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) return ret_val; /* Verify phy id and set remaining function pointers */ switch (phy->id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1111_I_PHY_ID: phy->type = e1000_phy_m88; phy->ops.get_phy_info = igb_get_phy_info_m88; if (phy->id == I347AT4_E_PHY_ID || phy->id == M88E1112_E_PHY_ID) phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; else phy->ops.get_cable_length = igb_get_cable_length_m88; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->ops.get_phy_info = igb_get_phy_info_igp; phy->ops.get_cable_length = igb_get_cable_length_igp_2; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; break; case I82580_I_PHY_ID: case I350_I_PHY_ID: phy->type = e1000_phy_82580; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; phy->ops.get_cable_length = igb_get_cable_length_82580; phy->ops.get_phy_info = igb_get_phy_info_82580; break; default: return -E1000_ERR_PHY; } return 0; } /** * igb_acquire_phy_82575 - Acquire rights to access PHY * @hw: pointer to the HW structure * * Acquire access rights to the correct PHY. This is a * function pointer entry point called by the api module. **/ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_SWFW_PHY2_SM; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; return igb_acquire_swfw_sync_82575(hw, mask); } /** * igb_release_phy_82575 - Release rights to access PHY * @hw: pointer to the HW structure * * A wrapper to release access rights to the correct PHY. This is a * function pointer entry point called by the api module. **/ static void igb_release_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_SWFW_PHY2_SM; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; igb_release_swfw_sync_82575(hw, mask); } /** * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the serial gigabit media independent * interface and stores the retrieved information in data. **/ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val = -E1000_ERR_PARAM; if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { hw_dbg("PHY Address %u is out of range\n", offset); goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = igb_read_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset using the serial gigabit * media independent interface. **/ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val = -E1000_ERR_PARAM; if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { hw_dbg("PHY Address %d is out of range\n", offset); goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = igb_write_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * igb_get_phy_id_82575 - Retrieve PHY addr and id * @hw: pointer to the HW structure * * Retrieves the PHY address and ID for both PHY's which do and do not use * sgmi interface. **/ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u32 ctrl_ext; u32 mdic; /* * For SGMII PHYs, we try the list of possible addresses until * we find one that works. For non-SGMII PHYs * (e.g. integrated copper PHYs), an address of 1 should * work. The result of this function should mean phy->phy_addr * and phy->id are set correctly. */ if (!(igb_sgmii_active_82575(hw))) { phy->addr = 1; ret_val = igb_get_phy_id(hw); goto out; } if (igb_sgmii_uses_mdio_82575(hw)) { switch (hw->mac.type) { case e1000_82575: case e1000_82576: mdic = rd32(E1000_MDIC); mdic &= E1000_MDIC_PHY_MASK; phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; break; case e1000_82580: case e1000_i350: mdic = rd32(E1000_MDICNFG); mdic &= E1000_MDICNFG_PHY_MASK; phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; break; default: ret_val = -E1000_ERR_PHY; goto out; break; } ret_val = igb_get_phy_id(hw); goto out; } /* Power on sgmii phy if it is disabled */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); wrfl(); msleep(300); /* * The address field in the I2CCMD register is 3 bits and 0 is invalid. * Therefore, we need to test 1-7 */ for (phy->addr = 1; phy->addr < 8; phy->addr++) { ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == 0) { hw_dbg("Vendor ID 0x%08X read at address %u\n", phy_id, phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. */ if (phy_id == M88_VENDOR) break; } else { hw_dbg("PHY address %u was unreadable\n", phy->addr); } } /* A valid PHY type couldn't be found. */ if (phy->addr == 8) { phy->addr = 0; ret_val = -E1000_ERR_PHY; goto out; } else { ret_val = igb_get_phy_id(hw); } /* restore previous sfp cage power state */ wr32(E1000_CTRL_EXT, ctrl_ext); out: return ret_val; } /** * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY using the serial gigabit media independent interface. **/ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { s32 ret_val; /* * This isn't a true "hard" reset, but is the only reset * available to us at this time. */ hw_dbg("Soft resetting SGMII attached PHY...\n"); /* * SFP documentation requires the following to configure the SPF module * to work on SGMII. No further documentation is given. */ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); if (ret_val) goto out; ret_val = igb_phy_sw_reset(hw); out: return ret_val; } /** * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) goto out; if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } } out: return ret_val; } /** * igb_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure * * Acquire the necessary semaphores for exclusive access to the EEPROM. * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) { s32 ret_val; ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); if (ret_val) goto out; ret_val = igb_acquire_nvm(hw); if (ret_val) igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); out: return ret_val; } /** * igb_release_nvm_82575 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit, * then release the semaphores acquired. **/ static void igb_release_nvm_82575(struct e1000_hw *hw) { igb_release_nvm(hw); igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); } /** * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = 0; s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ while (i < timeout) { if (igb_get_hw_semaphore(hw)) { ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync = rd32(E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ igb_put_hw_semaphore(hw); mdelay(5); i++; } if (i == timeout) { hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync |= swmask; wr32(E1000_SW_FW_SYNC, swfw_sync); igb_put_hw_semaphore(hw); out: return ret_val; } /** * igb_release_swfw_sync_82575 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; while (igb_get_hw_semaphore(hw) != 0); /* Empty */ swfw_sync = rd32(E1000_SW_FW_SYNC); swfw_sync &= ~mask; wr32(E1000_SW_FW_SYNC, swfw_sync); igb_put_hw_semaphore(hw); } /** * igb_get_cfg_done_82575 - Read config done bit * @hw: pointer to the HW structure * * Read the management control register for the config done bit for * completion status. NOTE: silicon which is EEPROM-less will fail trying * to read the config done bit, so an error is *ONLY* logged and returns * 0. If we were to return with error, EEPROM-less silicon * would not be able to be reset or change link. **/ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; s32 ret_val = 0; u32 mask = E1000_NVM_CFG_DONE_PORT_0; if (hw->bus.func == 1) mask = E1000_NVM_CFG_DONE_PORT_1; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_NVM_CFG_DONE_PORT_2; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_NVM_CFG_DONE_PORT_3; while (timeout) { if (rd32(E1000_EEMNGCTL) & mask) break; msleep(1); timeout--; } if (!timeout) hw_dbg("MNG configuration cycle has not completed.\n"); /* If EEPROM is not marked present, init the PHY manually */ if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && (hw->phy.type == e1000_phy_igp_3)) igb_phy_init_script_igp3(hw); return ret_val; } /** * igb_check_for_link_82575 - Check for link * @hw: pointer to the HW structure * * If sgmii is enabled, then use the pcs register to determine link, otherwise * use the generic interface for determining link. **/ static s32 igb_check_for_link_82575(struct e1000_hw *hw) { s32 ret_val; u16 speed, duplex; if (hw->phy.media_type != e1000_media_type_copper) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, &duplex); /* * Use this flag to determine if link needs to be checked or * not. If we have link clear the flag so that we do not * continue to check for link. */ hw->mac.get_link_status = !hw->mac.serdes_has_link; } else { ret_val = igb_check_for_copper_link(hw); } return ret_val; } /** * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown * @hw: pointer to the HW structure **/ void igb_power_up_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !igb_sgmii_active_82575(hw)) return; /* Enable PCS to turn on link */ reg = rd32(E1000_PCS_CFG0); reg |= E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); /* Power up the laser */ reg = rd32(E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); /* flush the write to verify completion */ wrfl(); msleep(1); } /** * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Using the physical coding sub-layer (PCS), retrieve the current speed and * duplex, then store the values in the pointers provided. **/ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex) { struct e1000_mac_info *mac = &hw->mac; u32 pcs; /* Set up defaults for the return values of this function */ mac->serdes_has_link = false; *speed = 0; *duplex = 0; /* * Read the PCS Status register for link state. For non-copper mode, * the status register is not accurate. The PCS status register is * used instead. */ pcs = rd32(E1000_PCS_LSTAT); /* * The link up bit determines when link is up on autoneg. The sync ok * gets set once both sides sync up and agree upon link. Stable link * can be determined by checking for both link up and link sync ok */ if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { mac->serdes_has_link = true; /* Detect and store PCS speed */ if (pcs & E1000_PCS_LSTS_SPEED_1000) { *speed = SPEED_1000; } else if (pcs & E1000_PCS_LSTS_SPEED_100) { *speed = SPEED_100; } else { *speed = SPEED_10; } /* Detect and store PCS duplex */ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { *duplex = FULL_DUPLEX; } else { *duplex = HALF_DUPLEX; } } return 0; } /** * igb_shutdown_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure * * In the case of fiber serdes, shut down optics and PCS on driver unload * when management pass thru is not enabled. **/ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if (hw->phy.media_type != e1000_media_type_internal_serdes && igb_sgmii_active_82575(hw)) return; if (!igb_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = rd32(E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); /* shutdown the laser */ reg = rd32(E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); /* flush the write to verify completion */ wrfl(); msleep(1); } } /** * igb_reset_hw_82575 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. This is a * function pointer entry point called by the api module. **/ static s32 igb_reset_hw_82575(struct e1000_hw *hw) { u32 ctrl, icr; s32 ret_val; /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); /* set the completion timeout for interface */ ret_val = igb_set_pcie_completion_timeout(hw); if (ret_val) { hw_dbg("PCI-E Set completion timeout has failed.\n"); } hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); ctrl = rd32(E1000_CTRL); hw_dbg("Issuing a global reset to MAC\n"); wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ hw_dbg("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); return ret_val; } /** * igb_init_hw_82575 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 igb_init_hw_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; u16 i, rar_count = mac->rar_entry_count; /* Initialize identification LED */ ret_val = igb_id_led_init(hw); if (ret_val) { hw_dbg("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Disabling VLAN filtering */ hw_dbg("Initializing the IEEE VLAN\n"); if (hw->mac.type == e1000_i350) igb_clear_vfta_i350(hw); else igb_clear_vfta(hw); /* Setup the receive address */ igb_init_rx_addrs(hw, rar_count); /* Zero out the Multicast HASH table */ hw_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) array_wr32(E1000_MTA, i, 0); /* Zero out the Unicast HASH table */ hw_dbg("Zeroing the UTA\n"); for (i = 0; i < mac->uta_reg_count; i++) array_wr32(E1000_UTA, i, 0); /* Setup link and flow control */ ret_val = igb_setup_link(hw); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ igb_clear_hw_cntrs_82575(hw); return ret_val; } /** * igb_setup_copper_link_82575 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = rd32(E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); wr32(E1000_CTRL, ctrl); ret_val = igb_setup_serdes_link_82575(hw); if (ret_val) goto out; if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { /* allow time for SFP cage time to power up phy */ msleep(300); ret_val = hw->phy.ops.reset(hw); if (ret_val) { hw_dbg("Error resetting the PHY.\n"); goto out; } } switch (hw->phy.type) { case e1000_phy_m88: if (hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID) ret_val = igb_copper_link_setup_m88_gen2(hw); else ret_val = igb_copper_link_setup_m88(hw); break; case e1000_phy_igp_3: ret_val = igb_copper_link_setup_igp(hw); break; case e1000_phy_82580: ret_val = igb_copper_link_setup_82580(hw); break; default: ret_val = -E1000_ERR_PHY; break; } if (ret_val) goto out; ret_val = igb_setup_copper_link(hw); out: return ret_val; } /** * igb_setup_serdes_link_82575 - Setup link for serdes * @hw: pointer to the HW structure * * Configure the physical coding sub-layer (PCS) link. The PCS link is * used on copper connections where the serialized gigabit media independent * interface (sgmii), or serdes fiber is being used. Configures the link * for auto-negotiation or forces speed/duplex. **/ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) { u32 ctrl_ext, ctrl_reg, reg; bool pcs_autoneg; s32 ret_val = E1000_SUCCESS; u16 data; if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !igb_sgmii_active_82575(hw)) return ret_val; /* * On the 82575, SerDes loopback mode persists until it is * explicitly turned off or a power cycle is performed. A read to * the register does not indicate its status. Therefore, we ensure * loopback mode is disabled during initialization. */ wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); /* power on the sfp cage if present */ ctrl_ext = rd32(E1000_CTRL_EXT); ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, ctrl_ext); ctrl_reg = rd32(E1000_CTRL); ctrl_reg |= E1000_CTRL_SLU; if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { /* set both sw defined pins */ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; /* Set switch control to serdes energy detect */ reg = rd32(E1000_CONNSW); reg |= E1000_CONNSW_ENRGSRC; wr32(E1000_CONNSW, reg); } reg = rd32(E1000_PCS_LCTL); /* default pcs_autoneg to the same setting as mac autoneg */ pcs_autoneg = hw->mac.autoneg; switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { case E1000_CTRL_EXT_LINK_MODE_SGMII: /* sgmii mode lets the phy handle forcing speed/duplex */ pcs_autoneg = true; /* autoneg time out should be disabled for SGMII mode */ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); break; case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = false; default: if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); if (ret_val) { printk(KERN_DEBUG "NVM Read Error\n\n"); return ret_val; } if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) pcs_autoneg = false; } /* * non-SGMII modes only supports a speed of 1000/Full for the * link so it is best to just force the MAC and let the pcs * link either autoneg or be forced to 1000/Full */ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | E1000_CTRL_FD | E1000_CTRL_FRCDPX; /* set speed of 1000/Full if speed/duplex is forced */ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; break; } wr32(E1000_CTRL, ctrl_reg); /* * New SerDes mode allows for forcing speed or autonegotiating speed * at 1gb. Autoneg should be default set by most drivers. This is the * mode that will be compatible with older link partners and switches. * However, both are supported by the hardware and some drivers/tools. */ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); /* * We force flow control to prevent the CTRL register values from being * overwritten by the autonegotiated flow control values */ reg |= E1000_PCS_LCTL_FORCE_FCTRL; if (pcs_autoneg) { /* Set PCS register for autoneg */ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { /* Set PCS register for forced link */ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } wr32(E1000_PCS_LCTL, reg); if (!igb_sgmii_active_82575(hw)) igb_force_mac_fc(hw); return ret_val; } /** * igb_sgmii_active_82575 - Return sgmii state * @hw: pointer to the HW structure * * 82575 silicon has a serialized gigabit media independent interface (sgmii) * which can be enabled for use in the embedded applications. Simply * return the current state of the sgmii interface. **/ static bool igb_sgmii_active_82575(struct e1000_hw *hw) { struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; return dev_spec->sgmii_active; } /** * igb_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure * * Inits recommended HW defaults after a reset when there is no EEPROM * detected. This is only for the 82575. **/ static s32 igb_reset_init_script_82575(struct e1000_hw *hw) { if (hw->mac.type == e1000_82575) { hw_dbg("Running reset init script for 82575\n"); /* SerDes configuration via SERDESCTRL */ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); /* CCM configuration via CCMCTL register */ igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); /* PCIe lanes configuration */ igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); /* PCIe PLL Configuration */ igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); } return 0; } /** * igb_read_mac_addr_82575 - Read device MAC address * @hw: pointer to the HW structure **/ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) { s32 ret_val = 0; /* * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = igb_check_alt_mac_addr(hw); if (ret_val) goto out; ret_val = igb_read_mac_addr(hw); out: return ret_val; } /** * igb_power_down_phy_copper_82575 - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ void igb_power_down_phy_copper_82575(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) igb_power_down_phy_copper(hw); } /** * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) { igb_clear_hw_cntrs_base(hw); rd32(E1000_PRC64); rd32(E1000_PRC127); rd32(E1000_PRC255); rd32(E1000_PRC511); rd32(E1000_PRC1023); rd32(E1000_PRC1522); rd32(E1000_PTC64); rd32(E1000_PTC127); rd32(E1000_PTC255); rd32(E1000_PTC511); rd32(E1000_PTC1023); rd32(E1000_PTC1522); rd32(E1000_ALGNERRC); rd32(E1000_RXERRC); rd32(E1000_TNCRS); rd32(E1000_CEXTERR); rd32(E1000_TSCTC); rd32(E1000_TSCTFC); rd32(E1000_MGTPRC); rd32(E1000_MGTPDC); rd32(E1000_MGTPTC); rd32(E1000_IAC); rd32(E1000_ICRXOC); rd32(E1000_ICRXPTC); rd32(E1000_ICRXATC); rd32(E1000_ICTXPTC); rd32(E1000_ICTXATC); rd32(E1000_ICTXQEC); rd32(E1000_ICTXQMTC); rd32(E1000_ICRXDMTC); rd32(E1000_CBTMPC); rd32(E1000_HTDPMC); rd32(E1000_CBRMPC); rd32(E1000_RPTHC); rd32(E1000_HGPTC); rd32(E1000_HTCBDPC); rd32(E1000_HGORCL); rd32(E1000_HGORCH); rd32(E1000_HGOTCL); rd32(E1000_HGOTCH); rd32(E1000_LENERRS); /* This register should not be read in copper configurations */ if (hw->phy.media_type == e1000_media_type_internal_serdes || igb_sgmii_active_82575(hw)) rd32(E1000_SCVPC); } /** * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable * @hw: pointer to the HW structure * * After rx enable if managability is enabled then there is likely some * bad data at the start of the fifo and possibly in the DMA fifo. This * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) { u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; if (hw->mac.type != e1000_82575 || !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; /* Disable all RX queues */ for (i = 0; i < 4; i++) { rxdctl[i] = rd32(E1000_RXDCTL(i)); wr32(E1000_RXDCTL(i), rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } /* Poll all queues to verify they have shut down */ for (ms_wait = 0; ms_wait < 10; ms_wait++) { msleep(1); rx_enabled = 0; for (i = 0; i < 4; i++) rx_enabled |= rd32(E1000_RXDCTL(i)); if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) break; } if (ms_wait == 10) hw_dbg("Queue disable timed out after 10ms\n"); /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ rfctl = rd32(E1000_RFCTL); wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = rd32(E1000_RLPML); wr32(E1000_RLPML, 0); rctl = rd32(E1000_RCTL); temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); temp_rctl |= E1000_RCTL_LPE; wr32(E1000_RCTL, temp_rctl); wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); wrfl(); msleep(2); /* Enable RX queues that were previously enabled and restore our * previous state */ for (i = 0; i < 4; i++) wr32(E1000_RXDCTL(i), rxdctl[i]); wr32(E1000_RCTL, rctl); wrfl(); wr32(E1000_RLPML, rlpml); wr32(E1000_RFCTL, rfctl); /* Flush receive errors generated by workaround */ rd32(E1000_ROC); rd32(E1000_RNBC); rd32(E1000_MPC); } /** * igb_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure * * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, * however the hardware default for these parts is 500us to 1ms which is less * than the 10ms recommended by the pci-e spec. To address this we need to * increase the value to either 10ms to 200ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) { u32 gcr = rd32(E1000_GCR); s32 ret_val = 0; u16 pcie_devctl2; /* only take action if timeout value is defaulted to 0 */ if (gcr & E1000_GCR_CMPL_TMOUT_MASK) goto out; /* * if capababilities version is type 1 we can write the * timeout of 10ms to 200ms through the GCR register */ if (!(gcr & E1000_GCR_CAP_VER2)) { gcr |= E1000_GCR_CMPL_TMOUT_10ms; goto out; } /* * for version 2 capabilities we need to write the config space * directly in order to set the completion timeout value for * 16ms to 55ms */ ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, &pcie_devctl2); if (ret_val) goto out; pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, &pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; wr32(E1000_GCR, gcr); return ret_val; } /** * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * @pf: Physical Function pool - do not set anti-spoofing for the PF * * enables/disables L2 switch anti-spoofing functionality. **/ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) { u32 dtxswc; switch (hw->mac.type) { case e1000_82576: case e1000_i350: dtxswc = rd32(E1000_DTXSWC); if (enable) { dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); /* The PF can spoof - it has to in order to * support emulation mode NICs */ dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); } else { dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); } wr32(E1000_DTXSWC, dtxswc); break; default: break; } } /** * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables L2 switch loopback functionality. **/ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) { u32 dtxswc; switch (hw->mac.type) { case e1000_82576: dtxswc = rd32(E1000_DTXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; wr32(E1000_DTXSWC, dtxswc); break; case e1000_i350: dtxswc = rd32(E1000_TXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; wr32(E1000_TXSWC, dtxswc); break; default: /* Currently no other hardware supports loopback */ break; } } /** * igb_vmdq_set_replication_pf - enable or disable vmdq replication * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables replication of packets across multiple pools. **/ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) { u32 vt_ctl = rd32(E1000_VT_CTL); if (enable) vt_ctl |= E1000_VT_CTL_VM_REPL_EN; else vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; wr32(E1000_VT_CTL, vt_ctl); } /** * igb_read_phy_reg_82580 - Read 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = igb_read_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * igb_write_phy_reg_82580 - Write 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = igb_write_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits * @hw: pointer to the HW structure * * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on * the values found in the EEPROM. This addresses an issue in which these * bits are not restored from EEPROM after reset. **/ static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) { s32 ret_val = 0; u32 mdicnfg; u16 nvm_data = 0; if (hw->mac.type != e1000_82580) goto out; if (!igb_sgmii_active_82575(hw)) goto out; ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } mdicnfg = rd32(E1000_MDICNFG); if (nvm_data & NVM_WORD24_EXT_MDIO) mdicnfg |= E1000_MDICNFG_EXT_MDIO; if (nvm_data & NVM_WORD24_COM_MDIO) mdicnfg |= E1000_MDICNFG_COM_MDIO; wr32(E1000_MDICNFG, mdicnfg); out: return ret_val; } /** * igb_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure * * This resets function or entire device (all ports, etc.) * to a known state. **/ static s32 igb_reset_hw_82580(struct e1000_hw *hw) { s32 ret_val = 0; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; u32 ctrl, icr; bool global_device_reset = hw->dev_spec._82575.global_device_reset; hw->dev_spec._82575.global_device_reset = false; /* Get current control state. */ ctrl = rd32(E1000_CTRL); /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); /* Determine whether or not a global dev reset is requested */ if (global_device_reset && igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) global_device_reset = false; if (global_device_reset && !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; wr32(E1000_CTRL, ctrl); wrfl(); /* Add delay to insure DEV_RST has time to complete */ if (global_device_reset) msleep(5); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ hw_dbg("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); /* clear global device reset status bit */ wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); ret_val = igb_reset_mdicnfg_82580(hw); if (ret_val) hw_dbg("Could not reset MDICNFG based on EEPROM\n"); /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); /* Release semaphore */ if (global_device_reset) igb_release_swfw_sync_82575(hw, swmbsw_mask); return ret_val; } /** * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size * @data: data received by reading RXPBS register * * The 82580 uses a table based approach for packet buffer allocation sizes. * This function converts the retrieved value into the correct table value * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 * 0x0 36 72 144 1 2 4 8 16 * 0x8 35 70 140 rsv rsv rsv rsv rsv */ u16 igb_rxpbs_adjust_82580(u32 data) { u16 ret_val = 0; if (data < E1000_82580_RXPBS_TABLE_SIZE) ret_val = e1000_82580_rxpbs_table[data]; return ret_val; } /** * igb_validate_nvm_checksum_with_offset - Validate EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { s32 ret_val = 0; u16 checksum = 0; u16 i, nvm_data; for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { hw_dbg("NVM Checksum Invalid\n"); ret_val = -E1000_ERR_NVM; goto out; } out: return ret_val; } /** * igb_update_nvm_checksum_with_offset - Update EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, &checksum); if (ret_val) hw_dbg("NVM Write Error while updating checksum.\n"); out: return ret_val; } /** * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) { s32 ret_val = 0; u16 eeprom_regions_count = 1; u16 j, nvm_data; u16 nvm_offset; ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { /* if checksums compatibility bit is set validate checksums * for all 4 ports. */ eeprom_regions_count = 4; } for (j = 0; j < eeprom_regions_count; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = igb_validate_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != 0) goto out; } out: return ret_val; } /** * igb_update_nvm_checksum_82580 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) { s32 ret_val; u16 j, nvm_data; u16 nvm_offset; ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error while updating checksum" " compatibility bit.\n"); goto out; } if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { /* set compatibility bit to validate checksums appropriately */ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Write Error while updating checksum" " compatibility bit.\n"); goto out; } } for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val) goto out; } out: return ret_val; } /** * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) { s32 ret_val = 0; u16 j; u16 nvm_offset; for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = igb_validate_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != 0) goto out; } out: return ret_val; } /** * igb_update_nvm_checksum_i350 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) { s32 ret_val = 0; u16 j; u16 nvm_offset; for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != 0) goto out; } out: return ret_val; } /** * igb_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure. * **/ s32 igb_set_eee_i350(struct e1000_hw *hw) { s32 ret_val = 0; u32 ipcnfg, eeer, ctrl_ext; ctrl_ext = rd32(E1000_CTRL_EXT); if ((hw->mac.type != e1000_i350) || (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) goto out; ipcnfg = rd32(E1000_IPCNFG); eeer = rd32(E1000_EEER); /* enable or disable per user setting */ if (!(hw->dev_spec._82575.eee_disable)) { ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); } else { ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); } wr32(E1000_IPCNFG, ipcnfg); wr32(E1000_EEER, eeer); out: return ret_val; } static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, .rar_set = igb_rar_set, .read_mac_addr = igb_read_mac_addr_82575, .get_speed_and_duplex = igb_get_speed_and_duplex_copper, }; static struct e1000_phy_operations e1000_phy_ops_82575 = { .acquire = igb_acquire_phy_82575, .get_cfg_done = igb_get_cfg_done_82575, .release = igb_release_phy_82575, }; static struct e1000_nvm_operations e1000_nvm_ops_82575 = { .acquire = igb_acquire_nvm_82575, .read = igb_read_nvm_eerd, .release = igb_release_nvm_82575, .write = igb_write_nvm_spi, }; const struct e1000_info e1000_82575_info = { .get_invariants = igb_get_invariants_82575, .mac_ops = &e1000_mac_ops_82575, .phy_ops = &e1000_phy_ops_82575, .nvm_ops = &e1000_nvm_ops_82575, };
gpl-2.0
ChaOSChriS/ChaOS-mako
drivers/char/ipmi/ipmi_watchdog.c
4854
35627
/* * ipmi_watchdog.c * * A watchdog timer based upon the IPMI interface. * * Author: MontaVista Software, Inc. * Corey Minyard <minyard@mvista.com> * source@mvista.com * * Copyright 2002 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include <linux/mutex.h> #include <linux/watchdog.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kdebug.h> #include <linux/rwsem.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <linux/notifier.h> #include <linux/nmi.h> #include <linux/reboot.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/atomic.h> #ifdef CONFIG_X86 /* * This is ugly, but I've determined that x86 is the only architecture * that can reasonably support the IPMI NMI watchdog timeout at this * time. If another architecture adds this capability somehow, it * will have to be a somewhat different mechanism and I have no idea * how it will work. So in the unlikely event that another * architecture supports this, we can figure out a good generic * mechanism for it at that time. */ #include <asm/kdebug.h> #include <asm/nmi.h> #define HAVE_DIE_NMI #endif #define PFX "IPMI Watchdog: " /* * The IPMI command/response information for the watchdog timer. */ /* values for byte 1 of the set command, byte 2 of the get response. */ #define WDOG_DONT_LOG (1 << 7) #define WDOG_DONT_STOP_ON_SET (1 << 6) #define WDOG_SET_TIMER_USE(byte, use) \ byte = ((byte) & 0xf8) | ((use) & 0x7) #define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7) #define WDOG_TIMER_USE_BIOS_FRB2 1 #define WDOG_TIMER_USE_BIOS_POST 2 #define WDOG_TIMER_USE_OS_LOAD 3 #define WDOG_TIMER_USE_SMS_OS 4 #define WDOG_TIMER_USE_OEM 5 /* values for byte 2 of the set command, byte 3 of the get response. */ #define WDOG_SET_PRETIMEOUT_ACT(byte, use) \ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4) #define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7) #define WDOG_PRETIMEOUT_NONE 0 #define WDOG_PRETIMEOUT_SMI 1 #define WDOG_PRETIMEOUT_NMI 2 #define WDOG_PRETIMEOUT_MSG_INT 3 /* Operations that can be performed on a pretimout. */ #define WDOG_PREOP_NONE 0 #define WDOG_PREOP_PANIC 1 /* Cause data to be available to read. Doesn't work in NMI mode. */ #define WDOG_PREOP_GIVE_DATA 2 /* Actions to perform on a full timeout. */ #define WDOG_SET_TIMEOUT_ACT(byte, use) \ byte = ((byte) & 0xf8) | ((use) & 0x7) #define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7) #define WDOG_TIMEOUT_NONE 0 #define WDOG_TIMEOUT_RESET 1 #define WDOG_TIMEOUT_POWER_DOWN 2 #define WDOG_TIMEOUT_POWER_CYCLE 3 /* * Byte 3 of the get command, byte 4 of the get response is the * pre-timeout in seconds. */ /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) #define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2) #define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3) #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) /* * Setting/getting the watchdog timer value. This is for bytes 5 and * 6 (the timeout time) of the set command, and bytes 6 and 7 (the * timeout time) and 8 and 9 (the current countdown value) of the * response. The timeout value is given in seconds (in the command it * is 100ms intervals). */ #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) #define WDOG_GET_TIMEOUT(byte1, byte2) \ (((byte1) | ((byte2) << 8)) / 10) #define IPMI_WDOG_RESET_TIMER 0x22 #define IPMI_WDOG_SET_TIMER 0x24 #define IPMI_WDOG_GET_TIMER 0x25 #define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80 /* These are here until the real ones get into the watchdog.h interface. */ #ifndef WDIOC_GETTIMEOUT #define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int) #endif #ifndef WDIOC_SET_PRETIMEOUT #define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int) #endif #ifndef WDIOC_GET_PRETIMEOUT #define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int) #endif static DEFINE_MUTEX(ipmi_watchdog_mutex); static bool nowayout = WATCHDOG_NOWAYOUT; static ipmi_user_t watchdog_user; static int watchdog_ifnum; /* Default the timeout to 10 seconds. */ static int timeout = 10; /* The pre-timeout is disabled by default. */ static int pretimeout; /* Default action is to reset the board on a timeout. */ static unsigned char action_val = WDOG_TIMEOUT_RESET; static char action[16] = "reset"; static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE; static char preaction[16] = "pre_none"; static unsigned char preop_val = WDOG_PREOP_NONE; static char preop[16] = "preop_none"; static DEFINE_SPINLOCK(ipmi_read_lock); static char data_to_read; static DECLARE_WAIT_QUEUE_HEAD(read_q); static struct fasync_struct *fasync_q; static char pretimeout_since_last_heartbeat; static char expect_close; static int ifnum_to_use = -1; /* Parameters to ipmi_set_timeout */ #define IPMI_SET_TIMEOUT_NO_HB 0 #define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1 #define IPMI_SET_TIMEOUT_FORCE_HB 2 static int ipmi_set_timeout(int do_heartbeat); static void ipmi_register_watchdog(int ipmi_intf); static void ipmi_unregister_watchdog(int ipmi_intf); /* * If true, the driver will start running as soon as it is configured * and ready. */ static int start_now; static int set_param_timeout(const char *val, const struct kernel_param *kp) { char *endp; int l; int rv = 0; if (!val) return -EINVAL; l = simple_strtoul(val, &endp, 0); if (endp == val) return -EINVAL; *((int *)kp->arg) = l; if (watchdog_user) rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); return rv; } static struct kernel_param_ops param_ops_timeout = { .set = set_param_timeout, .get = param_get_int, }; #define param_check_timeout param_check_int typedef int (*action_fn)(const char *intval, char *outval); static int action_op(const char *inval, char *outval); static int preaction_op(const char *inval, char *outval); static int preop_op(const char *inval, char *outval); static void check_parms(void); static int set_param_str(const char *val, const struct kernel_param *kp) { action_fn fn = (action_fn) kp->arg; int rv = 0; char valcp[16]; char *s; strncpy(valcp, val, 16); valcp[15] = '\0'; s = strstrip(valcp); rv = fn(s, NULL); if (rv) goto out; check_parms(); if (watchdog_user) rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); out: return rv; } static int get_param_str(char *buffer, const struct kernel_param *kp) { action_fn fn = (action_fn) kp->arg; int rv; rv = fn(NULL, buffer); if (rv) return rv; return strlen(buffer); } static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp) { int rv = param_set_int(val, kp); if (rv) return rv; if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum)) return 0; ipmi_unregister_watchdog(watchdog_ifnum); ipmi_register_watchdog(ifnum_to_use); return 0; } static struct kernel_param_ops param_ops_wdog_ifnum = { .set = set_param_wdog_ifnum, .get = param_get_int, }; #define param_check_wdog_ifnum param_check_int static struct kernel_param_ops param_ops_str = { .set = set_param_str, .get = get_param_str, }; module_param(ifnum_to_use, wdog_ifnum, 0644); MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " "timer. Setting to -1 defaults to the first registered " "interface"); module_param(timeout, timeout, 0644); MODULE_PARM_DESC(timeout, "Timeout value in seconds."); module_param(pretimeout, timeout, 0644); MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); module_param_cb(action, &param_ops_str, action_op, 0644); MODULE_PARM_DESC(action, "Timeout action. One of: " "reset, none, power_cycle, power_off."); module_param_cb(preaction, &param_ops_str, preaction_op, 0644); MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " "pre_none, pre_smi, pre_nmi, pre_int."); module_param_cb(preop, &param_ops_str, preop_op, 0644); MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " "preop_none, preop_panic, preop_give_data."); module_param(start_now, int, 0444); MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" "soon as the driver is loaded."); module_param(nowayout, bool, 0644); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=CONFIG_WATCHDOG_NOWAYOUT)"); /* Default state of the timer. */ static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; /* If shutting down via IPMI, we ignore the heartbeat. */ static int ipmi_ignore_heartbeat; /* Is someone using the watchdog? Only one user is allowed. */ static unsigned long ipmi_wdog_open; /* * If set to 1, the heartbeat command will set the state to reset and * start the timer. The timer doesn't normally run when the driver is * first opened until the heartbeat is set the first time, this * variable is used to accomplish this. */ static int ipmi_start_timer_on_heartbeat; /* IPMI version of the BMC. */ static unsigned char ipmi_version_major; static unsigned char ipmi_version_minor; /* If a pretimeout occurs, this is used to allow only one panic to happen. */ static atomic_t preop_panic_excl = ATOMIC_INIT(-1); #ifdef HAVE_DIE_NMI static int testing_nmi; static int nmi_handler_registered; #endif static int ipmi_heartbeat(void); /* * We use a mutex to make sure that only one thing can send a set * timeout at one time, because we only have one copy of the data. * The mutex is claimed when the set_timeout is sent and freed * when both messages are free. */ static atomic_t set_timeout_tofree = ATOMIC_INIT(0); static DEFINE_MUTEX(set_timeout_lock); static DECLARE_COMPLETION(set_timeout_wait); static void set_timeout_free_smi(struct ipmi_smi_msg *msg) { if (atomic_dec_and_test(&set_timeout_tofree)) complete(&set_timeout_wait); } static void set_timeout_free_recv(struct ipmi_recv_msg *msg) { if (atomic_dec_and_test(&set_timeout_tofree)) complete(&set_timeout_wait); } static struct ipmi_smi_msg set_timeout_smi_msg = { .done = set_timeout_free_smi }; static struct ipmi_recv_msg set_timeout_recv_msg = { .done = set_timeout_free_recv }; static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, int *send_heartbeat_now) { struct kernel_ipmi_msg msg; unsigned char data[6]; int rv; struct ipmi_system_interface_addr addr; int hbnow = 0; /* These can be cleared as we are setting the timeout. */ pretimeout_since_last_heartbeat = 0; data[0] = 0; WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); if ((ipmi_version_major > 1) || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { /* This is an IPMI 1.5-only feature. */ data[0] |= WDOG_DONT_STOP_ON_SET; } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* * In ipmi 1.0, setting the timer stops the watchdog, we * need to start it back up again. */ hbnow = 1; } data[1] = 0; WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state); if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) { WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val); data[2] = pretimeout; } else { WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE); data[2] = 0; /* No pretimeout. */ } data[3] = 0; WDOG_SET_TIMEOUT(data[4], data[5], timeout); addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_SET_TIMER; msg.data = data; msg.data_len = sizeof(data); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, smi_msg, recv_msg, 1); if (rv) { printk(KERN_WARNING PFX "set timeout error: %d\n", rv); } if (send_heartbeat_now) *send_heartbeat_now = hbnow; return rv; } static int ipmi_set_timeout(int do_heartbeat) { int send_heartbeat_now; int rv; /* We can only send one of these at a time. */ mutex_lock(&set_timeout_lock); atomic_set(&set_timeout_tofree, 2); rv = i_ipmi_set_timeout(&set_timeout_smi_msg, &set_timeout_recv_msg, &send_heartbeat_now); if (rv) { mutex_unlock(&set_timeout_lock); goto out; } wait_for_completion(&set_timeout_wait); mutex_unlock(&set_timeout_lock); if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) || ((send_heartbeat_now) && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) rv = ipmi_heartbeat(); out: return rv; } static atomic_t panic_done_count = ATOMIC_INIT(0); static void panic_smi_free(struct ipmi_smi_msg *msg) { atomic_dec(&panic_done_count); } static void panic_recv_free(struct ipmi_recv_msg *msg) { atomic_dec(&panic_done_count); } static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = { .done = panic_smi_free }; static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = { .done = panic_recv_free }; static void panic_halt_ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr addr; int rv; /* * Don't reset the timer if we have the timer turned off, that * re-enables the watchdog. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) return; addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, &panic_halt_heartbeat_smi_msg, &panic_halt_heartbeat_recv_msg, 1); if (rv) atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { .done = panic_smi_free }; static struct ipmi_recv_msg panic_halt_recv_msg = { .done = panic_recv_free }; /* * Special call, doesn't claim any locks. This is only to be called * at panic or halt time, in run-to-completion mode, when the caller * is the only CPU and the only thing that will be going is these IPMI * calls. */ static void panic_halt_ipmi_set_timeout(void) { int send_heartbeat_now; int rv; /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); atomic_add(2, &panic_done_count); rv = i_ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { atomic_sub(2, &panic_done_count); printk(KERN_WARNING PFX "Unable to extend the watchdog timeout."); } else { if (send_heartbeat_now) panic_halt_ipmi_heartbeat(); } while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); } /* * We use a mutex to make sure that only one thing can send a * heartbeat at one time, because we only have one copy of the data. * The semaphore is claimed when the set_timeout is sent and freed * when both messages are free. */ static atomic_t heartbeat_tofree = ATOMIC_INIT(0); static DEFINE_MUTEX(heartbeat_lock); static DECLARE_COMPLETION(heartbeat_wait); static void heartbeat_free_smi(struct ipmi_smi_msg *msg) { if (atomic_dec_and_test(&heartbeat_tofree)) complete(&heartbeat_wait); } static void heartbeat_free_recv(struct ipmi_recv_msg *msg) { if (atomic_dec_and_test(&heartbeat_tofree)) complete(&heartbeat_wait); } static struct ipmi_smi_msg heartbeat_smi_msg = { .done = heartbeat_free_smi }; static struct ipmi_recv_msg heartbeat_recv_msg = { .done = heartbeat_free_recv }; static int ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; int rv; struct ipmi_system_interface_addr addr; int timeout_retries = 0; if (ipmi_ignore_heartbeat) return 0; if (ipmi_start_timer_on_heartbeat) { ipmi_start_timer_on_heartbeat = 0; ipmi_watchdog_state = action_val; return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); } else if (pretimeout_since_last_heartbeat) { /* * A pretimeout occurred, make sure we set the timeout. * We don't want to set the action, though, we want to * leave that alone (thus it can't be combined with the * above operation. */ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); } mutex_lock(&heartbeat_lock); restart: atomic_set(&heartbeat_tofree, 2); /* * Don't reset the timer if we have the timer turned off, that * re-enables the watchdog. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { mutex_unlock(&heartbeat_lock); return 0; } addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, &heartbeat_smi_msg, &heartbeat_recv_msg, 1); if (rv) { mutex_unlock(&heartbeat_lock); printk(KERN_WARNING PFX "heartbeat failure: %d\n", rv); return rv; } /* Wait for the heartbeat to be sent. */ wait_for_completion(&heartbeat_wait); if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { timeout_retries++; if (timeout_retries > 3) { printk(KERN_ERR PFX ": Unable to restore the IPMI" " watchdog's settings, giving up.\n"); rv = -EIO; goto out_unlock; } /* * The timer was not initialized, that means the BMC was * probably reset and lost the watchdog information. Attempt * to restore the timer's info. Note that we still hold * the heartbeat lock, to keep a heartbeat from happening * in this process, so must say no heartbeat to avoid a * deadlock on this mutex. */ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); if (rv) { printk(KERN_ERR PFX ": Unable to send the command to" " set the watchdog's settings, giving up.\n"); goto out_unlock; } /* We might need a new heartbeat, so do it now */ goto restart; } else if (heartbeat_recv_msg.msg.data[0] != 0) { /* * Got an error in the heartbeat response. It was already * reported in ipmi_wdog_msg_handler, but we should return * an error here. */ rv = -EINVAL; } out_unlock: mutex_unlock(&heartbeat_lock); return rv; } static struct watchdog_info ident = { .options = 0, /* WDIOF_SETTIMEOUT, */ .firmware_version = 1, .identity = "IPMI" }; static int ipmi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int i; int val; switch (cmd) { case WDIOC_GETSUPPORT: i = copy_to_user(argp, &ident, sizeof(ident)); return i ? -EFAULT : 0; case WDIOC_SETTIMEOUT: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; timeout = val; return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); case WDIOC_GETTIMEOUT: i = copy_to_user(argp, &timeout, sizeof(timeout)); if (i) return -EFAULT; return 0; case WDIOC_SET_PRETIMEOUT: case WDIOC_SETPRETIMEOUT: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; pretimeout = val; return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); case WDIOC_GET_PRETIMEOUT: case WDIOC_GETPRETIMEOUT: i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); if (i) return -EFAULT; return 0; case WDIOC_KEEPALIVE: return ipmi_heartbeat(); case WDIOC_SETOPTIONS: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; if (val & WDIOS_DISABLECARD) { ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); ipmi_start_timer_on_heartbeat = 0; } if (val & WDIOS_ENABLECARD) { ipmi_watchdog_state = action_val; ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); } return 0; case WDIOC_GETSTATUS: val = 0; i = copy_to_user(argp, &val, sizeof(val)); if (i) return -EFAULT; return 0; default: return -ENOIOCTLCMD; } } static long ipmi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ipmi_watchdog_mutex); ret = ipmi_ioctl(file, cmd, arg); mutex_unlock(&ipmi_watchdog_mutex); return ret; } static ssize_t ipmi_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int rv; if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } rv = ipmi_heartbeat(); if (rv) return rv; } return len; } static ssize_t ipmi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rv = 0; wait_queue_t wait; if (count <= 0) return 0; /* * Reading returns if the pretimeout has gone off, and it only does * it once per pretimeout. */ spin_lock(&ipmi_read_lock); if (!data_to_read) { if (file->f_flags & O_NONBLOCK) { rv = -EAGAIN; goto out; } init_waitqueue_entry(&wait, current); add_wait_queue(&read_q, &wait); while (!data_to_read) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ipmi_read_lock); schedule(); spin_lock(&ipmi_read_lock); } remove_wait_queue(&read_q, &wait); if (signal_pending(current)) { rv = -ERESTARTSYS; goto out; } } data_to_read = 0; out: spin_unlock(&ipmi_read_lock); if (rv == 0) { if (copy_to_user(buf, &data_to_read, 1)) rv = -EFAULT; else rv = 1; } return rv; } static int ipmi_open(struct inode *ino, struct file *filep) { switch (iminor(ino)) { case WATCHDOG_MINOR: if (test_and_set_bit(0, &ipmi_wdog_open)) return -EBUSY; /* * Don't start the timer now, let it start on the * first heartbeat. */ ipmi_start_timer_on_heartbeat = 1; return nonseekable_open(ino, filep); default: return (-ENODEV); } } static unsigned int ipmi_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; poll_wait(file, &read_q, wait); spin_lock(&ipmi_read_lock); if (data_to_read) mask |= (POLLIN | POLLRDNORM); spin_unlock(&ipmi_read_lock); return mask; } static int ipmi_fasync(int fd, struct file *file, int on) { int result; result = fasync_helper(fd, file, on, &fasync_q); return (result); } static int ipmi_close(struct inode *ino, struct file *filep) { if (iminor(ino) == WATCHDOG_MINOR) { if (expect_close == 42) { ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); ipmi_heartbeat(); } clear_bit(0, &ipmi_wdog_open); } expect_close = 0; return 0; } static const struct file_operations ipmi_wdog_fops = { .owner = THIS_MODULE, .read = ipmi_read, .poll = ipmi_poll, .write = ipmi_write, .unlocked_ioctl = ipmi_unlocked_ioctl, .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ipmi_wdog_fops }; static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, void *handler_data) { if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) printk(KERN_INFO PFX "response: The IPMI controller appears" " to have been reset, will attempt to reinitialize" " the watchdog timer\n"); else if (msg->msg.data[0] != 0) printk(KERN_ERR PFX "response: Error %x on cmd %x\n", msg->msg.data[0], msg->msg.cmd); ipmi_free_recv_msg(msg); } static void ipmi_wdog_pretimeout_handler(void *handler_data) { if (preaction_val != WDOG_PRETIMEOUT_NONE) { if (preop_val == WDOG_PREOP_PANIC) { if (atomic_inc_and_test(&preop_panic_excl)) panic("Watchdog pre-timeout"); } else if (preop_val == WDOG_PREOP_GIVE_DATA) { spin_lock(&ipmi_read_lock); data_to_read = 1; wake_up_interruptible(&read_q); kill_fasync(&fasync_q, SIGIO, POLL_IN); spin_unlock(&ipmi_read_lock); } } /* * On some machines, the heartbeat will give an error and not * work unless we re-enable the timer. So do so. */ pretimeout_since_last_heartbeat = 1; } static struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = ipmi_wdog_msg_handler, .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler }; static void ipmi_register_watchdog(int ipmi_intf) { int rv = -EBUSY; if (watchdog_user) goto out; if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf)) goto out; watchdog_ifnum = ipmi_intf; rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); if (rv < 0) { printk(KERN_CRIT PFX "Unable to register with ipmi\n"); goto out; } ipmi_get_version(watchdog_user, &ipmi_version_major, &ipmi_version_minor); rv = misc_register(&ipmi_wdog_miscdev); if (rv < 0) { ipmi_destroy_user(watchdog_user); watchdog_user = NULL; printk(KERN_CRIT PFX "Unable to register misc device\n"); } #ifdef HAVE_DIE_NMI if (nmi_handler_registered) { int old_pretimeout = pretimeout; int old_timeout = timeout; int old_preop_val = preop_val; /* * Set the pretimeout to go off in a second and give * ourselves plenty of time to stop the timer. */ ipmi_watchdog_state = WDOG_TIMEOUT_RESET; preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ pretimeout = 99; timeout = 100; testing_nmi = 1; rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); if (rv) { printk(KERN_WARNING PFX "Error starting timer to" " test NMI: 0x%x. The NMI pretimeout will" " likely not work\n", rv); rv = 0; goto out_restore; } msleep(1500); if (testing_nmi != 2) { printk(KERN_WARNING PFX "IPMI NMI didn't seem to" " occur. The NMI pretimeout will" " likely not work\n"); } out_restore: testing_nmi = 0; preop_val = old_preop_val; pretimeout = old_pretimeout; timeout = old_timeout; } #endif out: if ((start_now) && (rv == 0)) { /* Run from startup, so start the timer now. */ start_now = 0; /* Disable this function after first startup. */ ipmi_watchdog_state = action_val; ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); printk(KERN_INFO PFX "Starting now!\n"); } else { /* Stop the timer now. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } } static void ipmi_unregister_watchdog(int ipmi_intf) { int rv; if (!watchdog_user) goto out; if (watchdog_ifnum != ipmi_intf) goto out; /* Make sure no one can call us any more. */ misc_deregister(&ipmi_wdog_miscdev); /* * Wait to make sure the message makes it out. The lower layer has * pointers to our buffers, we want to make sure they are done before * we release our memory. */ while (atomic_read(&set_timeout_tofree)) schedule_timeout_uninterruptible(1); /* Disconnect from IPMI. */ rv = ipmi_destroy_user(watchdog_user); if (rv) { printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", rv); } watchdog_user = NULL; out: return; } #ifdef HAVE_DIE_NMI static int ipmi_nmi(unsigned int val, struct pt_regs *regs) { /* * If we get here, it's an NMI that's not a memory or I/O * error. We can't truly tell if it's from IPMI or not * without sending a message, and sending a message is almost * impossible because of locking. */ if (testing_nmi) { testing_nmi = 2; return NMI_HANDLED; } /* If we are not expecting a timeout, ignore it. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) return NMI_DONE; if (preaction_val != WDOG_PRETIMEOUT_NMI) return NMI_DONE; /* * If no one else handled the NMI, we assume it was the IPMI * watchdog. */ if (preop_val == WDOG_PREOP_PANIC) { /* On some machines, the heartbeat will give an error and not work unless we re-enable the timer. So do so. */ pretimeout_since_last_heartbeat = 1; if (atomic_inc_and_test(&preop_panic_excl)) panic(PFX "pre-timeout"); } return NMI_HANDLED; } #endif static int wdog_reboot_handler(struct notifier_block *this, unsigned long code, void *unused) { static int reboot_event_handled; if ((watchdog_user) && (!reboot_event_handled)) { /* Make sure we only do this once. */ reboot_event_handled = 1; if (code == SYS_POWER_OFF || code == SYS_HALT) { /* Disable the WDT if we are shutting down. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* Set a long timer to let the reboot happens, but reboot if it hangs, but only if the watchdog timer was already running. */ timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } } return NOTIFY_OK; } static struct notifier_block wdog_reboot_notifier = { .notifier_call = wdog_reboot_handler, .next = NULL, .priority = 0 }; static int wdog_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { static int panic_event_handled; /* On a panic, if we have a panic timeout, make sure to extend the watchdog timer to a reasonable value to complete the panic, if the watchdog timer is running. Plus the pretimeout is meaningless at panic time. */ if (watchdog_user && !panic_event_handled && ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* Make sure we do this only once. */ panic_event_handled = 1; timeout = 255; pretimeout = 0; panic_halt_ipmi_set_timeout(); } return NOTIFY_OK; } static struct notifier_block wdog_panic_notifier = { .notifier_call = wdog_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static void ipmi_new_smi(int if_num, struct device *device) { ipmi_register_watchdog(if_num); } static void ipmi_smi_gone(int if_num) { ipmi_unregister_watchdog(if_num); } static struct ipmi_smi_watcher smi_watcher = { .owner = THIS_MODULE, .new_smi = ipmi_new_smi, .smi_gone = ipmi_smi_gone }; static int action_op(const char *inval, char *outval) { if (outval) strcpy(outval, action); if (!inval) return 0; if (strcmp(inval, "reset") == 0) action_val = WDOG_TIMEOUT_RESET; else if (strcmp(inval, "none") == 0) action_val = WDOG_TIMEOUT_NONE; else if (strcmp(inval, "power_cycle") == 0) action_val = WDOG_TIMEOUT_POWER_CYCLE; else if (strcmp(inval, "power_off") == 0) action_val = WDOG_TIMEOUT_POWER_DOWN; else return -EINVAL; strcpy(action, inval); return 0; } static int preaction_op(const char *inval, char *outval) { if (outval) strcpy(outval, preaction); if (!inval) return 0; if (strcmp(inval, "pre_none") == 0) preaction_val = WDOG_PRETIMEOUT_NONE; else if (strcmp(inval, "pre_smi") == 0) preaction_val = WDOG_PRETIMEOUT_SMI; #ifdef HAVE_DIE_NMI else if (strcmp(inval, "pre_nmi") == 0) preaction_val = WDOG_PRETIMEOUT_NMI; #endif else if (strcmp(inval, "pre_int") == 0) preaction_val = WDOG_PRETIMEOUT_MSG_INT; else return -EINVAL; strcpy(preaction, inval); return 0; } static int preop_op(const char *inval, char *outval) { if (outval) strcpy(outval, preop); if (!inval) return 0; if (strcmp(inval, "preop_none") == 0) preop_val = WDOG_PREOP_NONE; else if (strcmp(inval, "preop_panic") == 0) preop_val = WDOG_PREOP_PANIC; else if (strcmp(inval, "preop_give_data") == 0) preop_val = WDOG_PREOP_GIVE_DATA; else return -EINVAL; strcpy(preop, inval); return 0; } static void check_parms(void) { #ifdef HAVE_DIE_NMI int do_nmi = 0; int rv; if (preaction_val == WDOG_PRETIMEOUT_NMI) { do_nmi = 1; if (preop_val == WDOG_PREOP_GIVE_DATA) { printk(KERN_WARNING PFX "Pretimeout op is to give data" " but NMI pretimeout is enabled, setting" " pretimeout op to none\n"); preop_op("preop_none", NULL); do_nmi = 0; } } if (do_nmi && !nmi_handler_registered) { rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, "ipmi"); if (rv) { printk(KERN_WARNING PFX "Can't register nmi handler\n"); return; } else nmi_handler_registered = 1; } else if (!do_nmi && nmi_handler_registered) { unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); nmi_handler_registered = 0; } #endif } static int __init ipmi_wdog_init(void) { int rv; if (action_op(action, NULL)) { action_op("reset", NULL); printk(KERN_INFO PFX "Unknown action '%s', defaulting to" " reset\n", action); } if (preaction_op(preaction, NULL)) { preaction_op("pre_none", NULL); printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" " none\n", preaction); } if (preop_op(preop, NULL)) { preop_op("preop_none", NULL); printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" " none\n", preop); } check_parms(); register_reboot_notifier(&wdog_reboot_notifier); atomic_notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); rv = ipmi_smi_watcher_register(&smi_watcher); if (rv) { #ifdef HAVE_DIE_NMI if (nmi_handler_registered) unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); #endif atomic_notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); unregister_reboot_notifier(&wdog_reboot_notifier); printk(KERN_WARNING PFX "can't register smi watcher\n"); return rv; } printk(KERN_INFO PFX "driver initialized\n"); return 0; } static void __exit ipmi_wdog_exit(void) { ipmi_smi_watcher_unregister(&smi_watcher); ipmi_unregister_watchdog(watchdog_ifnum); #ifdef HAVE_DIE_NMI if (nmi_handler_registered) unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); #endif atomic_notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); unregister_reboot_notifier(&wdog_reboot_notifier); } module_exit(ipmi_wdog_exit); module_init(ipmi_wdog_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
gpl-2.0
androthan/android_kernel_samsung_aalto-eu
drivers/pps/kc.c
8438
3840
/* * PPS kernel consumer API * * Copyright (C) 2009-2010 Alexander Gordeev <lasaine@lvk.cs.msu.su> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/pps_kernel.h> #include "kc.h" /* * Global variables */ /* state variables to bind kernel consumer */ DEFINE_SPINLOCK(pps_kc_hardpps_lock); /* PPS API (RFC 2783): current source and mode for kernel consumer */ struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ /* pps_kc_bind - control PPS kernel consumer binding * @pps: the PPS source * @bind_args: kernel consumer bind parameters * * This function is used to bind or unbind PPS kernel consumer according to * supplied parameters. Should not be called in interrupt context. */ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) { /* Check if another consumer is already bound */ spin_lock_irq(&pps_kc_hardpps_lock); if (bind_args->edge == 0) if (pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel" " consumer\n"); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "selected kernel consumer" " is not bound\n"); return -EINVAL; } else if (pps_kc_hardpps_dev == NULL || pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = bind_args->edge; pps_kc_hardpps_dev = pps; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "bound kernel consumer: " "edge=0x%x\n", bind_args->edge); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "another kernel consumer" " is already bound\n"); return -EINVAL; } return 0; } /* pps_kc_remove - unbind kernel consumer on PPS source removal * @pps: the PPS source * * This function is used to disable kernel consumer on PPS source removal * if this source was bound to PPS kernel consumer. Can be called on any * source safely. Should not be called in interrupt context. */ void pps_kc_remove(struct pps_device *pps) { spin_lock_irq(&pps_kc_hardpps_lock); if (pps == pps_kc_hardpps_dev) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel consumer" " on device removal\n"); } else spin_unlock_irq(&pps_kc_hardpps_lock); } /* pps_kc_event - call hardpps() on PPS event * @pps: the PPS source * @ts: PPS event timestamp * @event: PPS event edge * * This function calls hardpps() when an event from bound PPS source occurs. */ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) hardpps(&ts->ts_real, &ts->ts_raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); }
gpl-2.0
ivanmeler/android_kernel_samsung_n7100
net/rxrpc/ar-transport.c
10742
7236
/* RxRPC point-to-point transport session management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" static void rxrpc_transport_reaper(struct work_struct *work); static LIST_HEAD(rxrpc_transports); static DEFINE_RWLOCK(rxrpc_transport_lock); static unsigned long rxrpc_transport_timeout = 3600 * 24; static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); /* * allocate a new transport session manager */ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, struct rxrpc_peer *peer, gfp_t gfp) { struct rxrpc_transport *trans; _enter(""); trans = kzalloc(sizeof(struct rxrpc_transport), gfp); if (trans) { trans->local = local; trans->peer = peer; INIT_LIST_HEAD(&trans->link); trans->bundles = RB_ROOT; trans->client_conns = RB_ROOT; trans->server_conns = RB_ROOT; skb_queue_head_init(&trans->error_queue); spin_lock_init(&trans->client_lock); rwlock_init(&trans->conn_lock); atomic_set(&trans->usage, 1); trans->debug_id = atomic_inc_return(&rxrpc_debug_id); if (peer->srx.transport.family == AF_INET) { switch (peer->srx.transport_type) { case SOCK_DGRAM: INIT_WORK(&trans->error_handler, rxrpc_UDP_error_handler); break; default: BUG(); break; } } else { BUG(); } } _leave(" = %p", trans); return trans; } /* * obtain a transport session for the nominated endpoints */ struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, struct rxrpc_peer *peer, gfp_t gfp) { struct rxrpc_transport *trans, *candidate; const char *new = "old"; int usage; _enter("{%pI4+%hu},{%pI4+%hu},", &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port), &peer->srx.transport.sin.sin_addr, ntohs(peer->srx.transport.sin.sin_port)); /* search the transport list first */ read_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_transport; } read_unlock_bh(&rxrpc_transport_lock); /* not yet present - create a candidate for a new record and then * redo the search */ candidate = rxrpc_alloc_transport(local, peer, gfp); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } write_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_second; } /* we can now add the new candidate to the list */ trans = candidate; candidate = NULL; usage = atomic_read(&trans->usage); rxrpc_get_local(trans->local); atomic_inc(&trans->peer->usage); list_add_tail(&trans->link, &rxrpc_transports); write_unlock_bh(&rxrpc_transport_lock); new = "new"; success: _net("TRANSPORT %s %d local %d -> peer %d", new, trans->debug_id, trans->local->debug_id, trans->peer->debug_id); _leave(" = %p {u=%d}", trans, usage); return trans; /* we found the transport in the list immediately */ found_extant_transport: usage = atomic_inc_return(&trans->usage); read_unlock_bh(&rxrpc_transport_lock); goto success; /* we found the transport on the second time through the list */ found_extant_second: usage = atomic_inc_return(&trans->usage); write_unlock_bh(&rxrpc_transport_lock); kfree(candidate); goto success; } /* * find the transport connecting two endpoints */ struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, struct rxrpc_peer *peer) { struct rxrpc_transport *trans; _enter("{%pI4+%hu},{%pI4+%hu},", &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port), &peer->srx.transport.sin.sin_addr, ntohs(peer->srx.transport.sin.sin_port)); /* search the transport list */ read_lock_bh(&rxrpc_transport_lock); list_for_each_entry(trans, &rxrpc_transports, link) { if (trans->local == local && trans->peer == peer) goto found_extant_transport; } read_unlock_bh(&rxrpc_transport_lock); _leave(" = NULL"); return NULL; found_extant_transport: atomic_inc(&trans->usage); read_unlock_bh(&rxrpc_transport_lock); _leave(" = %p", trans); return trans; } /* * release a transport session */ void rxrpc_put_transport(struct rxrpc_transport *trans) { _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); ASSERTCMP(atomic_read(&trans->usage), >, 0); trans->put_time = get_seconds(); if (unlikely(atomic_dec_and_test(&trans->usage))) { _debug("zombie"); /* let the reaper determine the timeout to avoid a race with * overextending the timeout if the reaper is running at the * same time */ rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); } _leave(""); } /* * clean up a transport session */ static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) { _net("DESTROY TRANS %d", trans->debug_id); rxrpc_purge_queue(&trans->error_queue); rxrpc_put_local(trans->local); rxrpc_put_peer(trans->peer); kfree(trans); } /* * reap dead transports that have passed their expiry date */ static void rxrpc_transport_reaper(struct work_struct *work) { struct rxrpc_transport *trans, *_p; unsigned long now, earliest, reap_time; LIST_HEAD(graveyard); _enter(""); now = get_seconds(); earliest = ULONG_MAX; /* extract all the transports that have been dead too long */ write_lock_bh(&rxrpc_transport_lock); list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { _debug("reap TRANS %d { u=%d t=%ld }", trans->debug_id, atomic_read(&trans->usage), (long) now - (long) trans->put_time); if (likely(atomic_read(&trans->usage) > 0)) continue; reap_time = trans->put_time + rxrpc_transport_timeout; if (reap_time <= now) list_move_tail(&trans->link, &graveyard); else if (reap_time < earliest) earliest = reap_time; } write_unlock_bh(&rxrpc_transport_lock); if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); ASSERTCMP(earliest, >, now); rxrpc_queue_delayed_work(&rxrpc_transport_reap, (earliest - now) * HZ); } /* then destroy all those pulled out */ while (!list_empty(&graveyard)) { trans = list_entry(graveyard.next, struct rxrpc_transport, link); list_del_init(&trans->link); ASSERTCMP(atomic_read(&trans->usage), ==, 0); rxrpc_cleanup_transport(trans); } _leave(""); } /* * preemptively destroy all the transport session records rather than waiting * for them to time out */ void __exit rxrpc_destroy_all_transports(void) { _enter(""); rxrpc_transport_timeout = 0; cancel_delayed_work(&rxrpc_transport_reap); rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); _leave(""); }
gpl-2.0
maqiangddb/Android_kernel
drivers/md/faulty.c
247
9021
/* * faulty.c : Multiple Devices driver for Linux * * Copyright (C) 2004 Neil Brown * * fautly-device-simulator personality for md * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * The "faulty" personality causes some requests to fail. * * Possible failure modes are: * reads fail "randomly" but succeed on retry * writes fail "randomly" but succeed on retry * reads for some address fail and then persist until a write * reads for some address fail and then persist irrespective of write * writes for some address fail and persist * all writes fail * * Different modes can be active at a time, but only * one can be set at array creation. Others can be added later. * A mode can be one-shot or recurrent with the recurrence being * once in every N requests. * The bottom 5 bits of the "layout" indicate the mode. The * remainder indicate a period, or 0 for one-shot. * * There is an implementation limit on the number of concurrently * persisting-faulty blocks. When a new fault is requested that would * exceed the limit, it is ignored. * All current faults can be clear using a layout of "0". * * Requests are always sent to the device. If they are to fail, * we clone the bio and insert a new b_end_io into the chain. */ #define WriteTransient 0 #define ReadTransient 1 #define WritePersistent 2 #define ReadPersistent 3 #define WriteAll 4 /* doesn't go to device */ #define ReadFixable 5 #define Modes 6 #define ClearErrors 31 #define ClearFaults 30 #define AllPersist 100 /* internal use only */ #define NoPersist 101 #define ModeMask 0x1f #define ModeShift 5 #define MaxFault 50 #include <linux/blkdev.h> #include <linux/module.h> #include <linux/raid/md_u.h> #include <linux/slab.h> #include "md.h" #include <linux/seq_file.h> static void faulty_fail(struct bio *bio, int error) { struct bio *b = bio->bi_private; b->bi_size = bio->bi_size; b->bi_sector = bio->bi_sector; bio_put(bio); bio_io_error(b); } struct faulty_conf { int period[Modes]; atomic_t counters[Modes]; sector_t faults[MaxFault]; int modes[MaxFault]; int nfaults; struct md_rdev *rdev; }; static int check_mode(struct faulty_conf *conf, int mode) { if (conf->period[mode] == 0 && atomic_read(&conf->counters[mode]) <= 0) return 0; /* no failure, no decrement */ if (atomic_dec_and_test(&conf->counters[mode])) { if (conf->period[mode]) atomic_set(&conf->counters[mode], conf->period[mode]); return 1; } return 0; } static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir) { /* If we find a ReadFixable sector, we fix it ... */ int i; for (i=0; i<conf->nfaults; i++) if (conf->faults[i] >= start && conf->faults[i] < end) { /* found it ... */ switch (conf->modes[i] * 2 + dir) { case WritePersistent*2+WRITE: return 1; case ReadPersistent*2+READ: return 1; case ReadFixable*2+READ: return 1; case ReadFixable*2+WRITE: conf->modes[i] = NoPersist; return 0; case AllPersist*2+READ: case AllPersist*2+WRITE: return 1; default: return 0; } } return 0; } static void add_sector(struct faulty_conf *conf, sector_t start, int mode) { int i; int n = conf->nfaults; for (i=0; i<conf->nfaults; i++) if (conf->faults[i] == start) { switch(mode) { case NoPersist: conf->modes[i] = mode; return; case WritePersistent: if (conf->modes[i] == ReadPersistent || conf->modes[i] == ReadFixable) conf->modes[i] = AllPersist; else conf->modes[i] = WritePersistent; return; case ReadPersistent: if (conf->modes[i] == WritePersistent) conf->modes[i] = AllPersist; else conf->modes[i] = ReadPersistent; return; case ReadFixable: if (conf->modes[i] == WritePersistent || conf->modes[i] == ReadPersistent) conf->modes[i] = AllPersist; else conf->modes[i] = ReadFixable; return; } } else if (conf->modes[i] == NoPersist) n = i; if (n >= MaxFault) return; conf->faults[n] = start; conf->modes[n] = mode; if (conf->nfaults == n) conf->nfaults = n+1; } static void make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; if (bio_data_dir(bio) == WRITE) { /* write request */ if (atomic_read(&conf->counters[WriteAll])) { /* special case - don't decrement, don't generic_make_request, * just fail immediately */ bio_endio(bio, -EIO); return; } if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { add_sector(conf, bio->bi_sector, WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { add_sector(conf, bio->bi_sector, ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { add_sector(conf, bio->bi_sector, ReadFixable); failit = 1; } } if (failit) { struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); b->bi_bdev = conf->rdev->bdev; b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; } else bio->bi_bdev = conf->rdev->bdev; generic_make_request(bio); } static void status(struct seq_file *seq, struct mddev *mddev) { struct faulty_conf *conf = mddev->private; int n; if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) seq_printf(seq, " WriteTransient=%d(%d)", n, conf->period[WriteTransient]); if ((n=atomic_read(&conf->counters[ReadTransient])) != 0) seq_printf(seq, " ReadTransient=%d(%d)", n, conf->period[ReadTransient]); if ((n=atomic_read(&conf->counters[WritePersistent])) != 0) seq_printf(seq, " WritePersistent=%d(%d)", n, conf->period[WritePersistent]); if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0) seq_printf(seq, " ReadPersistent=%d(%d)", n, conf->period[ReadPersistent]); if ((n=atomic_read(&conf->counters[ReadFixable])) != 0) seq_printf(seq, " ReadFixable=%d(%d)", n, conf->period[ReadFixable]); if ((n=atomic_read(&conf->counters[WriteAll])) != 0) seq_printf(seq, " WriteAll"); seq_printf(seq, " nfaults=%d", conf->nfaults); } static int reshape(struct mddev *mddev) { int mode = mddev->new_layout & ModeMask; int count = mddev->new_layout >> ModeShift; struct faulty_conf *conf = mddev->private; if (mddev->new_layout < 0) return 0; /* new layout */ if (mode == ClearFaults) conf->nfaults = 0; else if (mode == ClearErrors) { int i; for (i=0 ; i < Modes ; i++) { conf->period[i] = 0; atomic_set(&conf->counters[i], 0); } } else if (mode < Modes) { conf->period[mode] = count; if (!count) count++; atomic_set(&conf->counters[mode], count); } else return -EINVAL; mddev->new_layout = -1; mddev->layout = -1; /* makes sure further changes come through */ return 0; } static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) { WARN_ONCE(raid_disks, "%s does not support generic reshape\n", __func__); if (sectors == 0) return mddev->dev_sectors; return sectors; } static int run(struct mddev *mddev) { struct md_rdev *rdev; int i; struct faulty_conf *conf; if (md_check_no_bitmap(mddev)) return -EINVAL; conf = kmalloc(sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; for (i=0; i<Modes; i++) { atomic_set(&conf->counters[i], 0); conf->period[i] = 0; } conf->nfaults = 0; list_for_each_entry(rdev, &mddev->disks, same_set) conf->rdev = rdev; md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); mddev->private = conf; reshape(mddev); return 0; } static int stop(struct mddev *mddev) { struct faulty_conf *conf = mddev->private; kfree(conf); mddev->private = NULL; return 0; } static struct md_personality faulty_personality = { .name = "faulty", .level = LEVEL_FAULTY, .owner = THIS_MODULE, .make_request = make_request, .run = run, .stop = stop, .status = status, .check_reshape = reshape, .size = faulty_size, }; static int __init raid_init(void) { return register_md_personality(&faulty_personality); } static void raid_exit(void) { unregister_md_personality(&faulty_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Fault injection personality for MD"); MODULE_ALIAS("md-personality-10"); /* faulty */ MODULE_ALIAS("md-faulty"); MODULE_ALIAS("md-level--5");
gpl-2.0
Foxda-Tech/argo8-kernel
drivers/net/wireless/iwlwifi/mvm/bt-coex.c
503
18453
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <net/mac80211.h> #include "fw-api-bt-coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" #define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \ [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \ ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS)) static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1, BT_COEX_PRIO_TBL_PRIO_BYPASS, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2, BT_COEX_PRIO_TBL_PRIO_BYPASS, 1), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1, BT_COEX_PRIO_TBL_PRIO_LOW, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2, BT_COEX_PRIO_TBL_PRIO_LOW, 1), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1, BT_COEX_PRIO_TBL_PRIO_HIGH, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2, BT_COEX_PRIO_TBL_PRIO_HIGH, 1), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM, BT_COEX_PRIO_TBL_DISABLED, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52, BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24, BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0), EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE, BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0), 0, 0, 0, 0, 0, 0, }; #undef EVENT_PRIO_ANT /* BT Antenna Coupling Threshold (dB) */ #define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) #define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3) #define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62) #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) #define BT_REDUCED_TX_POWER_BIT BIT(7) static inline bool is_loose_coex(void) { return iwlwifi_mod_params.ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD; } int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) { return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC, sizeof(struct iwl_bt_coex_prio_tbl_cmd), &iwl_bt_prio_tbl); } static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type) { struct iwl_bt_coex_prot_env_cmd env_cmd; int ret; env_cmd.action = action; env_cmd.type = type; ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC, sizeof(env_cmd), &env_cmd); if (ret) IWL_ERR(mvm, "failed to send BT env command\n"); return ret; } enum iwl_bt_kill_msk { BT_KILL_MSK_DEFAULT, BT_KILL_MSK_SCO_HID_A2DP, BT_KILL_MSK_REDUCED_TXPOW, BT_KILL_MSK_MAX, }; static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = { [BT_KILL_MSK_DEFAULT] = 0xffff0000, [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff, [BT_KILL_MSK_REDUCED_TXPOW] = 0, }; static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = { [BT_KILL_MSK_DEFAULT] = 0xffff0000, [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff, [BT_KILL_MSK_REDUCED_TXPOW] = 0, }; #define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0) /* Tight Coex */ static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaeaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xcc00ff28), cpu_to_le32(0x0000aaaa), cpu_to_le32(0xcc00aaaa), cpu_to_le32(0x0000aaaa), cpu_to_le32(0xc0004000), cpu_to_le32(0x00000000), cpu_to_le32(0xf0005000), cpu_to_le32(0xf0005000), }; /* Loose Coex */ static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaeaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xcc00ff28), cpu_to_le32(0x0000aaaa), cpu_to_le32(0xcc00aaaa), cpu_to_le32(0x0000aaaa), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0xf0005000), cpu_to_le32(0xf0005000), }; /* Full concurrency */ static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), }; int iwl_send_bt_init_conf(struct iwl_mvm *mvm) { struct iwl_bt_coex_cmd cmd = { .max_kill = 5, .bt3_time_t7_value = 1, .bt3_prio_sample_time = 2, .bt3_timer_t2_value = 0xc, }; int ret; cmd.flags = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE; cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE | BT_VALID_BT_PRIO_BOOST | BT_VALID_MAX_KILL | BT_VALID_3W_TMRS | BT_VALID_KILL_ACK | BT_VALID_KILL_CTS | BT_VALID_REDUCED_TX_POWER | BT_VALID_LUT); if (is_loose_coex()) memcpy(&cmd.decision_lut, iwl_loose_lookup, sizeof(iwl_tight_lookup)); else memcpy(&cmd.decision_lut, iwl_tight_lookup, sizeof(iwl_tight_lookup)); cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST); cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]); cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); /* go to CALIB state in internal BT-Coex state machine */ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE, BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, sizeof(cmd), &cmd); } static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm, bool reduced_tx_power) { enum iwl_bt_kill_msk bt_kill_msk; struct iwl_bt_coex_cmd cmd = {}; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; lockdep_assert_held(&mvm->mutex); if (reduced_tx_power) { /* Reduced Tx power has precedence on the type of the profile */ bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW; } else { /* Low latency BT profile is active: give higher prio to BT */ if (BT_MBOX_MSG(notif, 3, SCO_STATE) || BT_MBOX_MSG(notif, 3, A2DP_STATE) || BT_MBOX_MSG(notif, 3, SNIFF_STATE)) bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP; else bt_kill_msk = BT_KILL_MSK_DEFAULT; } IWL_DEBUG_COEX(mvm, "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n", bt_kill_msk, BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in", BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in", BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in"); /* Don't send HCMD if there is no update */ if (bt_kill_msk == mvm->bt_kill_msk) return 0; mvm->bt_kill_msk = bt_kill_msk; cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]); cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]); cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS); IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk); return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, sizeof(cmd), &cmd); } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) { struct iwl_bt_coex_cmd cmd = { .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER), .bt_reduced_tx_power = sta_id, }; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; /* This can happen if the station has been removed right now */ if (sta_id == IWL_MVM_STATION_COUNT) return 0; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); mvmsta = (void *)sta->drv_priv; /* nothing to do */ if (mvmsta->bt_reduced_txpower == enable) return 0; if (enable) cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id); mvmsta->bt_reduced_txpower = enable; /* Send ASYNC since this can be sent from an atomic context */ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC, sizeof(cmd), &cmd); } struct iwl_bt_iterator_data { struct iwl_bt_coex_profile_notif *notif; struct iwl_mvm *mvm; u32 num_bss_ifaces; bool reduced_tx_power; }; static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_bt_iterator_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct ieee80211_chanctx_conf *chanctx_conf; enum ieee80211_smps_mode smps_mode; enum ieee80211_band band; int ave_rssi; if (vif->type != NL80211_IFTYPE_STATION) return; rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (chanctx_conf && chanctx_conf->def.chan) band = chanctx_conf->def.chan->band; else band = -1; rcu_read_unlock(); smps_mode = IEEE80211_SMPS_AUTOMATIC; if (band != IEEE80211_BAND_2GHZ) { ieee80211_request_smps(vif, smps_mode); return; } if (data->notif->bt_status) smps_mode = IEEE80211_SMPS_DYNAMIC; if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD) smps_mode = IEEE80211_SMPS_STATIC; IWL_DEBUG_COEX(data->mvm, "mac %d: bt_status %d traffic_load %d smps_req %d\n", mvmvif->id, data->notif->bt_status, data->notif->bt_traffic_load, smps_mode); ieee80211_request_smps(vif, smps_mode); /* don't reduce the Tx power if in loose scheme */ if (is_loose_coex()) return; data->num_bss_ifaces++; /* reduced Txpower only if there are open BT connections, so ...*/ if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) { /* ... cancel reduced Tx power ... */ if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); data->reduced_tx_power = false; /* ... and there is no need to get reports on RSSI any more. */ ieee80211_disable_rssi_reports(vif); return; } ave_rssi = ieee80211_ave_rssi(vif); /* if the RSSI isn't valid, fake it is very low */ if (!ave_rssi) ave_rssi = -100; if (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) { if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); /* * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the * BSS / P2P clients have rssi above threshold. * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before * the iteration, if one interface's rssi isn't good enough, * bt_kill_msk will be set to default values. */ } else if (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) { if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); /* * One interface hasn't rssi above threshold, bt_kill_msk must * be set to default values. */ data->reduced_tx_power = false; } /* Begin to monitor the RSSI: it may influence the reduced Tx power */ ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD, BT_ENABLE_REDUCED_TXPOWER_THRESHOLD); } static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { .mvm = mvm, .notif = &mvm->last_bt_notif, .reduced_tx_power = true, }; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); /* * If there are no BSS / P2P client interfaces, reduced Tx Power is * irrelevant since it is based on the RSSI coming from the beacon. * Use BT_KILL_MSK_DEFAULT in that case. */ data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces; if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power)) IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); } /* upon association, the fw will send in BT Coex notification */ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *dev_cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not "); IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load); IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", notif->bt_agg_traffic_load); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); /* remember this notification for future use: rssi fluctuations */ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); /* * This is an async handler for a notification, returning anything other * than 0 doesn't make sense even if HCMD failed. */ return 0; } static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; struct iwl_bt_iterator_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; if (vif->type != NL80211_IFTYPE_STATION || mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); mvmsta = (void *)sta->drv_priv; /* * This interface doesn't support reduced Tx power (because of low * RSSI probably), then set bt_kill_msk to default values. */ if (!mvmsta->bt_reduced_txpower) data->reduced_tx_power = false; /* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */ } void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event rssi_event) { struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; struct iwl_bt_iterator_data data = { .mvm = mvm, .reduced_tx_power = true, }; int ret; mutex_lock(&mvm->mutex); /* Rssi update while not associated ?! */ if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) goto out_unlock; /* No open connection - reports should be disabled */ if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2)) goto out_unlock; IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); /* * Check if rssi is good enough for reduced Tx power, but not in loose * scheme. */ if (rssi_event == RSSI_EVENT_LOW || is_loose_coex()) ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); else ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_rssi_iterator, &data); /* * If there are no BSS / P2P client interfaces, reduced Tx Power is * irrelevant since it is based on the RSSI coming from the beacon. * Use BT_KILL_MSK_DEFAULT in that case. */ data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces; if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power)) IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); out_unlock: mutex_unlock(&mvm->mutex); } void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; enum ieee80211_band band; rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (chanctx_conf && chanctx_conf->def.chan) band = chanctx_conf->def.chan->band; else band = -1; rcu_read_unlock(); /* if we are in 2GHz we will get a notification from the fw */ if (band == IEEE80211_BAND_2GHZ) return; /* else, we can remove all the constraints */ memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); }
gpl-2.0
go2ev-devteam/Gplus_2159_0801
openplatform/sdk/os/kernel-2.6.32/kernel/audit.c
503
40372
/* audit.c -- Auditing support * Gateway between the kernel (e.g., selinux) and the user-space audit daemon. * System-call specific features have moved to auditsc.c * * Copyright 2003-2007 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Goals: 1) Integrate fully with Security Modules. * 2) Minimal run-time overhead: * a) Minimal when syscall auditing is disabled (audit_enable=0). * b) Small when syscall auditing is enabled and no audit record * is generated (defer as much work as possible to record * generation time): * i) context is allocated, * ii) names from getname are stored without a copy, and * iii) inode information stored from path_lookup. * 3) Ability to disable syscall auditing at boot time (audit=0). * 4) Usable by other parts of the kernel (if audit_log* is called, * then a syscall record will be generated automatically for the * current syscall). * 5) Netlink interface to user-space. * 6) Support low-overhead kernel-based filtering to minimize the * information that must be passed to user-space. * * Example user-space utilities: http://people.redhat.com/sgrubb/audit/ */ #include <linux/init.h> #include <asm/types.h> #include <asm/atomic.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/audit.h> #include <net/sock.h> #include <net/netlink.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/inotify.h> #include <linux/freezer.h> #include <linux/tty.h> #include "audit.h" /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. * (Initialization happens after skb_init is called.) */ #define AUDIT_DISABLED -1 #define AUDIT_UNINITIALIZED 0 #define AUDIT_INITIALIZED 1 static int audit_initialized; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 int audit_enabled; int audit_ever_enabled; /* Default state when kernel boots without any parameters. */ static int audit_default; /* If auditing cannot proceed, audit_failure selects what happens. */ static int audit_failure = AUDIT_FAIL_PRINTK; /* * If audit records are to be written to the netlink socket, audit_pid * contains the pid of the auditd process and audit_nlk_pid contains * the pid to use to send netlink messages to that process. */ int audit_pid; static int audit_nlk_pid; /* If audit_rate_limit is non-zero, limit the rate of sending audit records * to that number per second. This prevents DoS attacks, but results in * audit records being dropped. */ static int audit_rate_limit; /* Number of outstanding audit_buffers allowed. */ static int audit_backlog_limit = 64; static int audit_backlog_wait_time = 60 * HZ; static int audit_backlog_wait_overflow = 0; /* The identity of the user shutting down the audit system. */ uid_t audit_sig_uid = -1; pid_t audit_sig_pid = -1; u32 audit_sig_sid = 0; /* Records can be lost in several ways: 0) [suppressed in audit_alloc] 1) out of memory in audit_log_start [kmalloc of struct audit_buffer] 2) out of memory in audit_log_move [alloc_skb] 3) suppressed due to audit_rate_limit 4) suppressed due to audit_backlog_limit */ static atomic_t audit_lost = ATOMIC_INIT(0); /* The netlink socket. */ static struct sock *audit_sock; /* Hash for inode-based rules */ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; /* The audit_freelist is a list of pre-allocated audit buffers (if more * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of * being placed on the freelist). */ static DEFINE_SPINLOCK(audit_freelist_lock); static int audit_freelist_count; static LIST_HEAD(audit_freelist); static struct sk_buff_head audit_skb_queue; /* queue of skbs to send to auditd when/if it comes back */ static struct sk_buff_head audit_skb_hold_queue; static struct task_struct *kauditd_task; static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); /* Serialize requests from userspace. */ DEFINE_MUTEX(audit_cmd_mutex); /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting * audit records. Since printk uses a 1024 byte buffer, this buffer * should be at least that large. */ #define AUDIT_BUFSIZ 1024 /* AUDIT_MAXFREE is the number of empty audit_buffers we keep on the * audit_freelist. Doing so eliminates many kmalloc/kfree calls. */ #define AUDIT_MAXFREE (2*NR_CPUS) /* The audit_buffer is used when formatting an audit record. The caller * locks briefly to get the record off the freelist or to allocate the * buffer, and locks briefly to send the buffer to the netlink layer or * to place it on a transmit queue. Multiple audit_buffers can be in * use simultaneously. */ struct audit_buffer { struct list_head list; struct sk_buff *skb; /* formatted skb ready to send */ struct audit_context *ctx; /* NULL or associated context */ gfp_t gfp_mask; }; struct audit_reply { int pid; struct sk_buff *skb; }; static void audit_set_pid(struct audit_buffer *ab, pid_t pid) { if (ab) { struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); nlh->nlmsg_pid = pid; } } void audit_panic(const char *message) { switch (audit_failure) { case AUDIT_FAIL_SILENT: break; case AUDIT_FAIL_PRINTK: if (printk_ratelimit()) printk(KERN_ERR "audit: %s\n", message); break; case AUDIT_FAIL_PANIC: /* test audit_pid since printk is always losey, why bother? */ if (audit_pid) panic("audit: %s\n", message); break; } } static inline int audit_rate_check(void) { static unsigned long last_check = 0; static int messages = 0; static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; unsigned long elapsed; int retval = 0; if (!audit_rate_limit) return 1; spin_lock_irqsave(&lock, flags); if (++messages < audit_rate_limit) { retval = 1; } else { now = jiffies; elapsed = now - last_check; if (elapsed > HZ) { last_check = now; messages = 0; retval = 1; } } spin_unlock_irqrestore(&lock, flags); return retval; } /** * audit_log_lost - conditionally log lost audit message event * @message: the message stating reason for lost audit message * * Emit at least 1 message per second, even if audit_rate_check is * throttling. * Always increment the lost messages counter. */ void audit_log_lost(const char *message) { static unsigned long last_msg = 0; static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; int print; atomic_inc(&audit_lost); print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); if (!print) { spin_lock_irqsave(&lock, flags); now = jiffies; if (now - last_msg > HZ) { print = 1; last_msg = now; } spin_unlock_irqrestore(&lock, flags); } if (print) { if (printk_ratelimit()) printk(KERN_WARNING "audit: audit_lost=%d audit_rate_limit=%d " "audit_backlog_limit=%d\n", atomic_read(&audit_lost), audit_rate_limit, audit_backlog_limit); audit_panic(message); } } static int audit_log_config_change(char *function_name, int new, int old, uid_t loginuid, u32 sessionid, u32 sid, int allow_changes) { struct audit_buffer *ab; int rc = 0; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, old, loginuid, sessionid); if (sid) { char *ctx = NULL; u32 len; rc = security_secid_to_secctx(sid, &ctx, &len); if (rc) { audit_log_format(ab, " sid=%u", sid); allow_changes = 0; /* Something weird, deny request */ } else { audit_log_format(ab, " subj=%s", ctx); security_release_secctx(ctx, len); } } audit_log_format(ab, " res=%d", allow_changes); audit_log_end(ab); return rc; } static int audit_do_config_change(char *function_name, int *to_change, int new, uid_t loginuid, u32 sessionid, u32 sid) { int allow_changes, rc = 0, old = *to_change; /* check if we are locked */ if (audit_enabled == AUDIT_LOCKED) allow_changes = 0; else allow_changes = 1; if (audit_enabled != AUDIT_OFF) { rc = audit_log_config_change(function_name, new, old, loginuid, sessionid, sid, allow_changes); if (rc) allow_changes = 0; } /* If we are allowed, make the change */ if (allow_changes == 1) *to_change = new; /* Not allowed, update reason */ else if (rc == 0) rc = -EPERM; return rc; } static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid, u32 sid) { return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit, loginuid, sessionid, sid); } static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid, u32 sid) { return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit, loginuid, sessionid, sid); } static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid) { int rc; if (state < AUDIT_OFF || state > AUDIT_LOCKED) return -EINVAL; rc = audit_do_config_change("audit_enabled", &audit_enabled, state, loginuid, sessionid, sid); if (!rc) audit_ever_enabled |= !!state; return rc; } static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid) { if (state != AUDIT_FAIL_SILENT && state != AUDIT_FAIL_PRINTK && state != AUDIT_FAIL_PANIC) return -EINVAL; return audit_do_config_change("audit_failure", &audit_failure, state, loginuid, sessionid, sid); } /* * Queue skbs to be sent to auditd when/if it comes back. These skbs should * already have been sent via prink/syslog and so if these messages are dropped * it is not a huge concern since we already passed the audit_log_lost() * notification and stuff. This is just nice to get audit messages during * boot before auditd is running or messages generated while auditd is stopped. * This only holds messages is audit_default is set, aka booting with audit=1 * or building your kernel that way. */ static void audit_hold_skb(struct sk_buff *skb) { if (audit_default && skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit) skb_queue_tail(&audit_skb_hold_queue, skb); else kfree_skb(skb); } /* * For one reason or another this nlh isn't getting delivered to the userspace * audit daemon, just send it to printk. */ static void audit_printk_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); char *data = NLMSG_DATA(nlh); if (nlh->nlmsg_type != AUDIT_EOE) { if (printk_ratelimit()) printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data); else audit_log_lost("printk limit exceeded\n"); } audit_hold_skb(skb); } static void kauditd_send_skb(struct sk_buff *skb) { int err; /* take a reference in case we can't send it and we want to hold it */ skb_get(skb); err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0); if (err < 0) { BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */ printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); audit_log_lost("auditd dissapeared\n"); audit_pid = 0; /* we might get lucky and get this in the next auditd */ audit_hold_skb(skb); } else /* drop the extra reference if sent ok */ kfree_skb(skb); } static int kauditd_thread(void *dummy) { struct sk_buff *skb; set_freezable(); while (!kthread_should_stop()) { /* * if auditd just started drain the queue of messages already * sent to syslog/printk. remember loss here is ok. we already * called audit_log_lost() if it didn't go out normally. so the * race between the skb_dequeue and the next check for audit_pid * doesn't matter. * * if you ever find kauditd to be too slow we can get a perf win * by doing our own locking and keeping better track if there * are messages in this queue. I don't see the need now, but * in 5 years when I want to play with this again I'll see this * note and still have no friggin idea what i'm thinking today. */ if (audit_default && audit_pid) { skb = skb_dequeue(&audit_skb_hold_queue); if (unlikely(skb)) { while (skb && audit_pid) { kauditd_send_skb(skb); skb = skb_dequeue(&audit_skb_hold_queue); } } } skb = skb_dequeue(&audit_skb_queue); wake_up(&audit_backlog_wait); if (skb) { if (audit_pid) kauditd_send_skb(skb); else audit_printk_skb(skb); } else { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kauditd_wait, &wait); if (!skb_queue_len(&audit_skb_queue)) { try_to_freeze(); schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&kauditd_wait, &wait); } } return 0; } static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) { struct task_struct *tsk; int err; read_lock(&tasklist_lock); tsk = find_task_by_vpid(pid); err = -ESRCH; if (!tsk) goto out; err = 0; spin_lock_irq(&tsk->sighand->siglock); if (!tsk->signal->audit_tty) err = -EPERM; spin_unlock_irq(&tsk->sighand->siglock); if (err) goto out; tty_audit_push_task(tsk, loginuid, sessionid); out: read_unlock(&tasklist_lock); return err; } int audit_send_list(void *_dest) { struct audit_netlink_list *dest = _dest; int pid = dest->pid; struct sk_buff *skb; /* wait for parent to finish and send an ACK */ mutex_lock(&audit_cmd_mutex); mutex_unlock(&audit_cmd_mutex); while ((skb = __skb_dequeue(&dest->q)) != NULL) netlink_unicast(audit_sock, skb, pid, 0); kfree(dest); return 0; } struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; void *data; int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; skb = nlmsg_new(size, GFP_KERNEL); if (!skb) return NULL; nlh = NLMSG_NEW(skb, pid, seq, t, size, flags); data = NLMSG_DATA(nlh); memcpy(data, payload, size); return skb; nlmsg_failure: /* Used by NLMSG_NEW */ if (skb) kfree_skb(skb); return NULL; } static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; mutex_lock(&audit_cmd_mutex); mutex_unlock(&audit_cmd_mutex); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(audit_sock, reply->skb, reply->pid, 0); kfree(reply); return 0; } /** * audit_send_reply - send an audit reply message via netlink * @pid: process id to send reply to * @seq: sequence number * @type: audit message type * @done: done (last) flag * @multi: multi-part message flag * @payload: payload data * @size: payload size * * Allocates an skb, builds the netlink message, and sends it to the pid. * No failure notifications. */ void audit_send_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; struct task_struct *tsk; struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), GFP_KERNEL); if (!reply) return; skb = audit_make_reply(pid, seq, type, done, multi, payload, size); if (!skb) goto out; reply->pid = pid; reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); if (!IS_ERR(tsk)) return; kfree_skb(skb); out: kfree(reply); } /* * Check for appropriate CAP_AUDIT_ capabilities on incoming audit * control messages. */ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) { int err = 0; switch (msg_type) { case AUDIT_GET: case AUDIT_LIST: case AUDIT_LIST_RULES: case AUDIT_SET: case AUDIT_ADD: case AUDIT_ADD_RULE: case AUDIT_DEL: case AUDIT_DEL_RULE: case AUDIT_SIGNAL_INFO: case AUDIT_TTY_GET: case AUDIT_TTY_SET: case AUDIT_TRIM: case AUDIT_MAKE_EQUIV: if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) err = -EPERM; break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (security_netlink_recv(skb, CAP_AUDIT_WRITE)) err = -EPERM; break; default: /* bad msg */ err = -EINVAL; } return err; } static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, u32 pid, u32 uid, uid_t auid, u32 ses, u32 sid) { int rc = 0; char *ctx = NULL; u32 len; if (!audit_enabled) { *ab = NULL; return rc; } *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u", pid, uid, auid, ses); if (sid) { rc = security_secid_to_secctx(sid, &ctx, &len); if (rc) audit_log_format(*ab, " ssid=%u", sid); else { audit_log_format(*ab, " subj=%s", ctx); security_release_secctx(ctx, len); } } return rc; } static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { u32 uid, pid, seq, sid; void *data; struct audit_status *status_get, status_set; int err; struct audit_buffer *ab; u16 msg_type = nlh->nlmsg_type; uid_t loginuid; /* loginuid of sender */ u32 sessionid; struct audit_sig_info *sig_data; char *ctx = NULL; u32 len; err = audit_netlink_ok(skb, msg_type); if (err) return err; /* As soon as there's any sign of userspace auditd, * start kauditd to talk to it */ if (!kauditd_task) kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); if (IS_ERR(kauditd_task)) { err = PTR_ERR(kauditd_task); kauditd_task = NULL; return err; } pid = NETLINK_CREDS(skb)->pid; uid = NETLINK_CREDS(skb)->uid; loginuid = NETLINK_CB(skb).loginuid; sessionid = NETLINK_CB(skb).sessionid; sid = NETLINK_CB(skb).sid; seq = nlh->nlmsg_seq; data = NLMSG_DATA(nlh); switch (msg_type) { case AUDIT_GET: status_set.enabled = audit_enabled; status_set.failure = audit_failure; status_set.pid = audit_pid; status_set.rate_limit = audit_rate_limit; status_set.backlog_limit = audit_backlog_limit; status_set.lost = atomic_read(&audit_lost); status_set.backlog = skb_queue_len(&audit_skb_queue); audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, &status_set, sizeof(status_set)); break; case AUDIT_SET: if (nlh->nlmsg_len < sizeof(struct audit_status)) return -EINVAL; status_get = (struct audit_status *)data; if (status_get->mask & AUDIT_STATUS_ENABLED) { err = audit_set_enabled(status_get->enabled, loginuid, sessionid, sid); if (err < 0) return err; } if (status_get->mask & AUDIT_STATUS_FAILURE) { err = audit_set_failure(status_get->failure, loginuid, sessionid, sid); if (err < 0) return err; } if (status_get->mask & AUDIT_STATUS_PID) { int new_pid = status_get->pid; if (audit_enabled != AUDIT_OFF) audit_log_config_change("audit_pid", new_pid, audit_pid, loginuid, sessionid, sid, 1); audit_pid = new_pid; audit_nlk_pid = NETLINK_CB(skb).pid; } if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { err = audit_set_rate_limit(status_get->rate_limit, loginuid, sessionid, sid); if (err < 0) return err; } if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) err = audit_set_backlog_limit(status_get->backlog_limit, loginuid, sessionid, sid); break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; err = audit_filter_user(&NETLINK_CB(skb)); if (err == 1) { err = 0; if (msg_type == AUDIT_USER_TTY) { err = audit_prepare_user_tty(pid, loginuid, sessionid); if (err) break; } audit_log_common_recv_msg(&ab, msg_type, pid, uid, loginuid, sessionid, sid); if (msg_type != AUDIT_USER_TTY) audit_log_format(ab, " msg='%.1024s'", (char *)data); else { int size; audit_log_format(ab, " msg="); size = nlmsg_len(nlh); if (size > 0 && ((unsigned char *)data)[size - 1] == '\0') size--; audit_log_n_untrustedstring(ab, data, size); } audit_set_pid(ab, pid); audit_log_end(ab); } break; case AUDIT_ADD: case AUDIT_DEL: if (nlmsg_len(nlh) < sizeof(struct audit_rule)) return -EINVAL; if (audit_enabled == AUDIT_LOCKED) { audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, uid, loginuid, sessionid, sid); audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled); audit_log_end(ab); return -EPERM; } /* fallthrough */ case AUDIT_LIST: err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, uid, seq, data, nlmsg_len(nlh), loginuid, sessionid, sid); break; case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) return -EINVAL; if (audit_enabled == AUDIT_LOCKED) { audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, uid, loginuid, sessionid, sid); audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled); audit_log_end(ab); return -EPERM; } /* fallthrough */ case AUDIT_LIST_RULES: err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, uid, seq, data, nlmsg_len(nlh), loginuid, sessionid, sid); break; case AUDIT_TRIM: audit_trim_trees(); audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, uid, loginuid, sessionid, sid); audit_log_format(ab, " op=trim res=1"); audit_log_end(ab); break; case AUDIT_MAKE_EQUIV: { void *bufp = data; u32 sizes[2]; size_t msglen = nlmsg_len(nlh); char *old, *new; err = -EINVAL; if (msglen < 2 * sizeof(u32)) break; memcpy(sizes, bufp, 2 * sizeof(u32)); bufp += 2 * sizeof(u32); msglen -= 2 * sizeof(u32); old = audit_unpack_string(&bufp, &msglen, sizes[0]); if (IS_ERR(old)) { err = PTR_ERR(old); break; } new = audit_unpack_string(&bufp, &msglen, sizes[1]); if (IS_ERR(new)) { err = PTR_ERR(new); kfree(old); break; } /* OK, here comes... */ err = audit_tag_tree(old, new); audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid, uid, loginuid, sessionid, sid); audit_log_format(ab, " op=make_equiv old="); audit_log_untrustedstring(ab, old); audit_log_format(ab, " new="); audit_log_untrustedstring(ab, new); audit_log_format(ab, " res=%d", !err); audit_log_end(ab); kfree(old); kfree(new); break; } case AUDIT_SIGNAL_INFO: len = 0; if (audit_sig_sid) { err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); if (err) return err; } sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); if (!sig_data) { if (audit_sig_sid) security_release_secctx(ctx, len); return -ENOMEM; } sig_data->uid = audit_sig_uid; sig_data->pid = audit_sig_pid; if (audit_sig_sid) { memcpy(sig_data->ctx, ctx, len); security_release_secctx(ctx, len); } audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 0, 0, sig_data, sizeof(*sig_data) + len); kfree(sig_data); break; case AUDIT_TTY_GET: { struct audit_tty_status s; struct task_struct *tsk; read_lock(&tasklist_lock); tsk = find_task_by_vpid(pid); if (!tsk) err = -ESRCH; else { spin_lock_irq(&tsk->sighand->siglock); s.enabled = tsk->signal->audit_tty != 0; spin_unlock_irq(&tsk->sighand->siglock); } read_unlock(&tasklist_lock); audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { struct audit_tty_status *s; struct task_struct *tsk; if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) return -EINVAL; s = data; if (s->enabled != 0 && s->enabled != 1) return -EINVAL; read_lock(&tasklist_lock); tsk = find_task_by_vpid(pid); if (!tsk) err = -ESRCH; else { spin_lock_irq(&tsk->sighand->siglock); tsk->signal->audit_tty = s->enabled != 0; spin_unlock_irq(&tsk->sighand->siglock); } read_unlock(&tasklist_lock); break; } default: err = -EINVAL; break; } return err < 0 ? err : 0; } /* * Get message from skb. Each message is processed by audit_receive_msg. * Malformed skbs with wrong length are discarded silently. */ static void audit_receive_skb(struct sk_buff *skb) { struct nlmsghdr *nlh; /* * len MUST be signed for NLMSG_NEXT to be able to dec it below 0 * if the nlmsg_len was not aligned */ int len; int err; nlh = nlmsg_hdr(skb); len = skb->len; while (NLMSG_OK(nlh, len)) { err = audit_receive_msg(skb, nlh); /* if err or if this message says it wants a response */ if (err || (nlh->nlmsg_flags & NLM_F_ACK)) netlink_ack(skb, nlh, err); nlh = NLMSG_NEXT(nlh, len); } } /* Receive messages from netlink socket. */ static void audit_receive(struct sk_buff *skb) { mutex_lock(&audit_cmd_mutex); audit_receive_skb(skb); mutex_unlock(&audit_cmd_mutex); } /* Initialize audit support at boot time. */ static int __init audit_init(void) { int i; if (audit_initialized == AUDIT_DISABLED) return 0; printk(KERN_INFO "audit: initializing netlink socket (%s)\n", audit_default ? "enabled" : "disabled"); audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, audit_receive, NULL, THIS_MODULE); if (!audit_sock) audit_panic("cannot initialize netlink socket"); else audit_sock->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; skb_queue_head_init(&audit_skb_queue); skb_queue_head_init(&audit_skb_hold_queue); audit_initialized = AUDIT_INITIALIZED; audit_enabled = audit_default; audit_ever_enabled |= !!audit_default; audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); for (i = 0; i < AUDIT_INODE_BUCKETS; i++) INIT_LIST_HEAD(&audit_inode_hash[i]); return 0; } __initcall(audit_init); /* Process kernel command-line parameter at boot time. audit=0 or audit=1. */ static int __init audit_enable(char *str) { audit_default = !!simple_strtol(str, NULL, 0); if (!audit_default) audit_initialized = AUDIT_DISABLED; printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled"); if (audit_initialized == AUDIT_INITIALIZED) { audit_enabled = audit_default; audit_ever_enabled |= !!audit_default; } else if (audit_initialized == AUDIT_UNINITIALIZED) { printk(" (after initialization)"); } else { printk(" (until reboot)"); } printk("\n"); return 1; } __setup("audit=", audit_enable); static void audit_buffer_free(struct audit_buffer *ab) { unsigned long flags; if (!ab) return; if (ab->skb) kfree_skb(ab->skb); spin_lock_irqsave(&audit_freelist_lock, flags); if (audit_freelist_count > AUDIT_MAXFREE) kfree(ab); else { audit_freelist_count++; list_add(&ab->list, &audit_freelist); } spin_unlock_irqrestore(&audit_freelist_lock, flags); } static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, gfp_t gfp_mask, int type) { unsigned long flags; struct audit_buffer *ab = NULL; struct nlmsghdr *nlh; spin_lock_irqsave(&audit_freelist_lock, flags); if (!list_empty(&audit_freelist)) { ab = list_entry(audit_freelist.next, struct audit_buffer, list); list_del(&ab->list); --audit_freelist_count; } spin_unlock_irqrestore(&audit_freelist_lock, flags); if (!ab) { ab = kmalloc(sizeof(*ab), gfp_mask); if (!ab) goto err; } ab->ctx = ctx; ab->gfp_mask = gfp_mask; ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); if (!ab->skb) goto nlmsg_failure; nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0); return ab; nlmsg_failure: /* Used by NLMSG_NEW */ kfree_skb(ab->skb); ab->skb = NULL; err: audit_buffer_free(ab); return NULL; } /** * audit_serial - compute a serial number for the audit record * * Compute a serial number for the audit record. Audit records are * written to user-space as soon as they are generated, so a complete * audit record may be written in several pieces. The timestamp of the * record and this serial number are used by the user-space tools to * determine which pieces belong to the same audit record. The * (timestamp,serial) tuple is unique for each syscall and is live from * syscall entry to syscall exit. * * NOTE: Another possibility is to store the formatted records off the * audit context (for those records that have a context), and emit them * all at syscall exit. However, this could delay the reporting of * significant errors until syscall exit (or never, if the system * halts). */ unsigned int audit_serial(void) { static DEFINE_SPINLOCK(serial_lock); static unsigned int serial = 0; unsigned long flags; unsigned int ret; spin_lock_irqsave(&serial_lock, flags); do { ret = ++serial; } while (unlikely(!ret)); spin_unlock_irqrestore(&serial_lock, flags); return ret; } static inline void audit_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { *t = CURRENT_TIME; *serial = audit_serial(); } } /* Obtain an audit buffer. This routine does locking to obtain the * audit buffer, but then no locking is required for calls to * audit_log_*format. If the tsk is a task that is currently in a * syscall, then the syscall is marked as auditable and an audit record * will be written at syscall exit. If there is no associated task, tsk * should be NULL. */ /** * audit_log_start - obtain an audit buffer * @ctx: audit_context (may be NULL) * @gfp_mask: type of allocation * @type: audit message type * * Returns audit_buffer pointer on success or NULL on error. * * Obtain an audit buffer. This routine does locking to obtain the * audit buffer, but then no locking is required for calls to * audit_log_*format. If the task (ctx) is a task that is currently in a * syscall, then the syscall is marked as auditable and an audit record * will be written at syscall exit. If there is no associated task, then * task context (ctx) should be NULL. */ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { struct audit_buffer *ab = NULL; struct timespec t; unsigned int uninitialized_var(serial); int reserve; unsigned long timeout_start = jiffies; if (audit_initialized != AUDIT_INITIALIZED) return NULL; if (unlikely(audit_filter_type(type))) return NULL; if (gfp_mask & __GFP_WAIT) reserve = 0; else reserve = 5; /* Allow atomic callers to go up to five entries over the normal backlog limit */ while (audit_backlog_limit && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time && time_before(jiffies, timeout_start + audit_backlog_wait_time)) { /* Wait for auditd to drain the queue a little */ DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&audit_backlog_wait, &wait); if (audit_backlog_limit && skb_queue_len(&audit_skb_queue) > audit_backlog_limit) schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies); __set_current_state(TASK_RUNNING); remove_wait_queue(&audit_backlog_wait, &wait); continue; } if (audit_rate_check() && printk_ratelimit()) printk(KERN_WARNING "audit: audit_backlog=%d > " "audit_backlog_limit=%d\n", skb_queue_len(&audit_skb_queue), audit_backlog_limit); audit_log_lost("backlog limit exceeded"); audit_backlog_wait_time = audit_backlog_wait_overflow; wake_up(&audit_backlog_wait); return NULL; } ab = audit_buffer_alloc(ctx, gfp_mask, type); if (!ab) { audit_log_lost("out of memory in audit_log_start"); return NULL; } audit_get_stamp(ab->ctx, &t, &serial); audit_log_format(ab, "audit(%lu.%03lu:%u): ", t.tv_sec, t.tv_nsec/1000000, serial); return ab; } /** * audit_expand - expand skb in the audit buffer * @ab: audit_buffer * @extra: space to add at tail of the skb * * Returns 0 (no space) on failed expansion, or available space if * successful. */ static inline int audit_expand(struct audit_buffer *ab, int extra) { struct sk_buff *skb = ab->skb; int oldtail = skb_tailroom(skb); int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask); int newtail = skb_tailroom(skb); if (ret < 0) { audit_log_lost("out of memory in audit_expand"); return 0; } skb->truesize += newtail - oldtail; return newtail; } /* * Format an audit message into the audit buffer. If there isn't enough * room in the audit buffer, more room will be allocated and vsnprint * will be called a second time. Currently, we assume that a printk * can't format message larger than 1024 bytes, so we don't either. */ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, va_list args) { int len, avail; struct sk_buff *skb; va_list args2; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); if (avail == 0) { avail = audit_expand(ab, AUDIT_BUFSIZ); if (!avail) goto out; } va_copy(args2, args); len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); if (len >= avail) { /* The printk buffer is 1024 bytes long, so if we get * here and AUDIT_BUFSIZ is at least 1024, then we can * log everything that printk could have logged. */ avail = audit_expand(ab, max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); if (!avail) goto out; len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); } va_end(args2); if (len > 0) skb_put(skb, len); out: return; } /** * audit_log_format - format a message into the audit buffer. * @ab: audit_buffer * @fmt: format string * @...: optional parameters matching @fmt string * * All the work is done in audit_log_vformat. */ void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) { va_list args; if (!ab) return; va_start(args, fmt); audit_log_vformat(ab, fmt, args); va_end(args); } /** * audit_log_hex - convert a buffer to hex and append it to the audit skb * @ab: the audit_buffer * @buf: buffer to convert to hex * @len: length of @buf to be converted * * No return value; failure to expand is silently ignored. * * This function will take the passed buf and convert it into a string of * ascii hex digits. The new string is placed onto the skb. */ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len) { int i, avail, new_len; unsigned char *ptr; struct sk_buff *skb; static const unsigned char *hex = "0123456789ABCDEF"; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); new_len = len<<1; if (new_len >= avail) { /* Round the buffer request up to the next multiple */ new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1); avail = audit_expand(ab, new_len); if (!avail) return; } ptr = skb_tail_pointer(skb); for (i=0; i<len; i++) { *ptr++ = hex[(buf[i] & 0xF0)>>4]; /* Upper nibble */ *ptr++ = hex[buf[i] & 0x0F]; /* Lower nibble */ } *ptr = 0; skb_put(skb, len << 1); /* new string is twice the old string */ } /* * Format a string of no more than slen characters into the audit buffer, * enclosed in quote marks. */ void audit_log_n_string(struct audit_buffer *ab, const char *string, size_t slen) { int avail, new_len; unsigned char *ptr; struct sk_buff *skb; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); new_len = slen + 3; /* enclosing quotes + null terminator */ if (new_len > avail) { avail = audit_expand(ab, new_len); if (!avail) return; } ptr = skb_tail_pointer(skb); *ptr++ = '"'; memcpy(ptr, string, slen); ptr += slen; *ptr++ = '"'; *ptr = 0; skb_put(skb, slen + 2); /* don't include null terminator */ } /** * audit_string_contains_control - does a string need to be logged in hex * @string: string to be checked * @len: max length of the string to check */ int audit_string_contains_control(const char *string, size_t len) { const unsigned char *p; for (p = string; p < (const unsigned char *)string + len; p++) { if (*p == '"' || *p < 0x21 || *p > 0x7e) return 1; } return 0; } /** * audit_log_n_untrustedstring - log a string that may contain random characters * @ab: audit_buffer * @len: length of string (not including trailing null) * @string: string to be logged * * This code will escape a string that is passed to it if the string * contains a control character, unprintable character, double quote mark, * or a space. Unescaped strings will start and end with a double quote mark. * Strings that are escaped are printed in hex (2 digits per char). * * The caller specifies the number of characters in the string to log, which may * or may not be the entire string. */ void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t len) { if (audit_string_contains_control(string, len)) audit_log_n_hex(ab, string, len); else audit_log_n_string(ab, string, len); } /** * audit_log_untrustedstring - log a string that may contain random characters * @ab: audit_buffer * @string: string to be logged * * Same as audit_log_n_untrustedstring(), except that strlen is used to * determine string length. */ void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) { audit_log_n_untrustedstring(ab, string, strlen(string)); } /* This is a helper-function to print the escaped d_path */ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, struct path *path) { char *p, *pathname; if (prefix) audit_log_format(ab, " %s", prefix); /* We will allow 11 spaces for ' (deleted)' to be appended */ pathname = kmalloc(PATH_MAX+11, ab->gfp_mask); if (!pathname) { audit_log_string(ab, "<no_memory>"); return; } p = d_path(path, pathname, PATH_MAX+11); if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ /* FIXME: can we save some information here? */ audit_log_string(ab, "<too_long>"); } else audit_log_untrustedstring(ab, p); kfree(pathname); } void audit_log_key(struct audit_buffer *ab, char *key) { audit_log_format(ab, " key="); if (key) audit_log_untrustedstring(ab, key); else audit_log_format(ab, "(null)"); } /** * audit_log_end - end one audit record * @ab: the audit_buffer * * The netlink_* functions cannot be called inside an irq context, so * the audit buffer is placed on a queue and a tasklet is scheduled to * remove them from the queue outside the irq context. May be called in * any context. */ void audit_log_end(struct audit_buffer *ab) { if (!ab) return; if (!audit_rate_check()) { audit_log_lost("rate limit exceeded"); } else { struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); if (audit_pid) { skb_queue_tail(&audit_skb_queue, ab->skb); wake_up_interruptible(&kauditd_wait); } else { audit_printk_skb(ab->skb); } ab->skb = NULL; } audit_buffer_free(ab); } /** * audit_log - Log an audit record * @ctx: audit context * @gfp_mask: type of allocation * @type: audit message type * @fmt: format string to use * @...: variable parameters matching the format string * * This is a convenience function that calls audit_log_start, * audit_log_vformat, and audit_log_end. It may be called * in any context. */ void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { struct audit_buffer *ab; va_list args; ab = audit_log_start(ctx, gfp_mask, type); if (ab) { va_start(args, fmt); audit_log_vformat(ab, fmt, args); va_end(args); audit_log_end(ab); } } EXPORT_SYMBOL(audit_log_start); EXPORT_SYMBOL(audit_log_end); EXPORT_SYMBOL(audit_log_format); EXPORT_SYMBOL(audit_log);
gpl-2.0
gurifuxi/gb_kernel_sc05d
net/ipv4/netfilter.c
759
6660
/* IPv4 specific functions of netfilter core */ #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <net/route.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi fl = {}; unsigned long orefdst; unsigned int hh_len; unsigned int type; type = inet_addr_type(net, iph->saddr); if (skb->sk && inet_sk(skb->sk)->transparent) type = RTN_LOCAL; if (addr_type == RTN_UNSPEC) addr_type = type; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ if (addr_type == RTN_LOCAL) { fl.nl_u.ip4_u.daddr = iph->daddr; if (type == RTN_LOCAL) fl.nl_u.ip4_u.saddr = iph->saddr; fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl.mark = skb->mark; fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; if (ip_route_output_key(net, &rt, &fl) != 0) return -1; /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->u.dst); } else { /* non-local src, find valid iif to satisfy * rp-filter when calling ip_route_input. */ fl.nl_u.ip4_u.daddr = iph->saddr; if (ip_route_output_key(net, &rt, &fl) != 0) return -1; orefdst = skb->_skb_refdst; if (ip_route_input(skb, iph->daddr, iph->saddr, RT_TOS(iph->tos), rt->u.dst.dev) != 0) { dst_release(&rt->u.dst); return -1; } dst_release(&rt->u.dst); refdst_drop(orefdst); } if (skb_dst(skb)->error) return -1; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, &fl, AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) return -1; skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -1; return 0; } EXPORT_SYMBOL(ip_route_me_harder); #ifdef CONFIG_XFRM int ip_xfrm_me_harder(struct sk_buff *skb) { struct flowi fl; unsigned int hh_len; struct dst_entry *dst; if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return 0; if (xfrm_decode_session(skb, &fl, AF_INET) < 0) return -1; dst = skb_dst(skb); if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) return -1; skb_dst_drop(skb); skb_dst_set(skb, dst); /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -1; return 0; } EXPORT_SYMBOL(ip_xfrm_me_harder); #endif void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(ip_nat_decode_session); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip_rt_info { __be32 daddr; __be32 saddr; u_int8_t tos; u_int32_t mark; }; static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct iphdr *iph = ip_hdr(skb); rt_info->tos = iph->tos; rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct iphdr *iph = ip_hdr(skb); if (!(iph->tos == rt_info->tos && skb->mark == rt_info->mark && iph->daddr == rt_info->daddr && iph->saddr == rt_info->saddr)) return ip_route_me_harder(skb, RTN_UNSPEC); } return 0; } __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if ((protocol == 0 && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: if (protocol == 0) skb->csum = 0; else skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len - dataoff, protocol, 0); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip_checksum); static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); skb->ip_summed = CHECKSUM_NONE; csum = __skb_checksum_complete_head(skb, dataoff + len); if (!csum) skb->ip_summed = CHECKSUM_UNNECESSARY; } return csum; } static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) { return ip_route_output_key(&init_net, (struct rtable **)dst, fl); } static const struct nf_afinfo nf_ip_afinfo = { .family = AF_INET, .checksum = nf_ip_checksum, .checksum_partial = nf_ip_checksum_partial, .route = nf_ip_route, .saveroute = nf_ip_saveroute, .reroute = nf_ip_reroute, .route_key_size = sizeof(struct ip_rt_info), }; static int ipv4_netfilter_init(void) { return nf_register_afinfo(&nf_ip_afinfo); } static void ipv4_netfilter_fini(void) { nf_unregister_afinfo(&nf_ip_afinfo); } module_init(ipv4_netfilter_init); module_exit(ipv4_netfilter_fini); #ifdef CONFIG_SYSCTL struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = { { .procname = "net", }, { .procname = "ipv4", }, { .procname = "netfilter", }, { } }; EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path); #endif /* CONFIG_SYSCTL */
gpl-2.0
techomancer/kernel-galaxytab-gb
drivers/pci/pcie/aer/aerdrv_core.c
759
21210
/* * drivers/pci/pcie/aer/aerdrv_core.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This file implements the core part of PCI-Express AER. When an pci-express * error is delivered, an error message will be collected and printed to * console, then, an error recovery procedure will be executed by following * the pci error recovery rules. * * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) * Zhang Yanmin (yanmin.zhang@intel.com) * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/slab.h> #include "aerdrv.h" static int forceload; static int nosourceid; module_param(forceload, bool, 0); module_param(nosourceid, bool, 0); int pci_enable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; pos = pci_pcie_cap(dev); if (!pos) return -EIO; pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); reg16 |= (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); return 0; } EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); int pci_disable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_pcie_cap(dev); if (!pos) return -EIO; pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); reg16 &= ~(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); return 0; } EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { int pos; u32 status; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); if (status) pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); return 0; } EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); /** * add_error_device - list device to be handled * @e_info: pointer to error info * @dev: pointer to pci_dev to be added */ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) { if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { e_info->dev[e_info->error_dev_num] = dev; e_info->error_dev_num++; return 0; } return -ENOSPC; } #define PCI_BUS(x) (((x) >> 8) & 0xff) /** * is_error_source - check whether the device is source of reported error * @dev: pointer to pci_dev to be checked * @e_info: pointer to reported error info */ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) { int pos; u32 status, mask; u16 reg16; /* * When bus id is equal to 0, it might be a bad id * reported by root port. */ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) { /* Device ID match? */ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) return true; /* Continue id comparing if there is no multiple error */ if (!e_info->multi_error_valid) return false; } /* * When either * 1) nosourceid==y; * 2) bus id is equal to 0. Some ports might lose the bus * id of error source id; * 3) There are multiple errors and prior id comparing fails; * We check AER status registers to find possible reporter. */ if (atomic_read(&dev->enable_cnt) == 0) return false; pos = pci_pcie_cap(dev); if (!pos) return false; /* Check if AER is enabled */ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); if (!(reg16 & ( PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE))) return false; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return false; /* Check if error is recorded */ if (e_info->severity == AER_CORRECTABLE) { pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); } else { pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); } if (status & ~mask) return true; return false; } static int find_device_iter(struct pci_dev *dev, void *data) { struct aer_err_info *e_info = (struct aer_err_info *)data; if (is_error_source(dev, e_info)) { /* List this device */ if (add_error_device(e_info, dev)) { /* We cannot handle more... Stop iteration */ /* TODO: Should print error message here? */ return 1; } /* If there is only a single error, stop iteration */ if (!e_info->multi_error_valid) return 1; } return 0; } /** * find_source_device - search through device hierarchy for source device * @parent: pointer to Root Port pci_dev data structure * @e_info: including detailed error information such like id * * Return true if found. * * Invoked by DPC when error is detected at the Root Port. * Caller of this function must set id, severity, and multi_error_valid of * struct aer_err_info pointed by @e_info properly. This function must fill * e_info->error_dev_num and e_info->dev[], based on the given information. */ static bool find_source_device(struct pci_dev *parent, struct aer_err_info *e_info) { struct pci_dev *dev = parent; int result; /* Must reset in this function */ e_info->error_dev_num = 0; /* Is Root Port an agent that sends error message? */ result = find_device_iter(dev, e_info); if (result) return true; pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { dev_printk(KERN_DEBUG, &parent->dev, "can't find device of ID%04x\n", e_info->id); return false; } return true; } static int report_error_detected(struct pci_dev *dev, void *data) { pci_ers_result_t vote; struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; dev->error_state = result_data->state; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->error_detected) { if (result_data->state == pci_channel_io_frozen && !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) { /* * In case of fatal recovery, if one of down- * stream device has no driver. We might be * unable to recover because a later insmod * of a driver for this device is unaware of * its hw state. */ dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n", dev->driver ? "no AER-aware driver" : "no driver"); } return 0; } err_handler = dev->driver->err_handler; vote = err_handler->error_detected(dev, result_data->state); result_data->result = merge_result(result_data->result, vote); return 0; } static int report_mmio_enabled(struct pci_dev *dev, void *data) { pci_ers_result_t vote; struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->mmio_enabled) return 0; err_handler = dev->driver->err_handler; vote = err_handler->mmio_enabled(dev); result_data->result = merge_result(result_data->result, vote); return 0; } static int report_slot_reset(struct pci_dev *dev, void *data) { pci_ers_result_t vote; struct pci_error_handlers *err_handler; struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->slot_reset) return 0; err_handler = dev->driver->err_handler; vote = err_handler->slot_reset(dev); result_data->result = merge_result(result_data->result, vote); return 0; } static int report_resume(struct pci_dev *dev, void *data) { struct pci_error_handlers *err_handler; dev->error_state = pci_channel_io_normal; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->resume) return 0; err_handler = dev->driver->err_handler; err_handler->resume(dev); return 0; } /** * broadcast_error_message - handle message broadcast to downstream drivers * @dev: pointer to from where in a hierarchy message is broadcasted down * @state: error state * @error_mesg: message to print * @cb: callback to be broadcasted * * Invoked during error recovery process. Once being invoked, the content * of error severity will be broadcasted to all downstream drivers in a * hierarchy in question. */ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, enum pci_channel_state state, char *error_mesg, int (*cb)(struct pci_dev *, void *)) { struct aer_broadcast_data result_data; dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg); result_data.state = state; if (cb == report_error_detected) result_data.result = PCI_ERS_RESULT_CAN_RECOVER; else result_data.result = PCI_ERS_RESULT_RECOVERED; if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { /* * If the error is reported by a bridge, we think this error * is related to the downstream link of the bridge, so we * do error recovery on all subordinates of the bridge instead * of the bridge and clear the error status of the bridge. */ if (cb == report_error_detected) dev->error_state = state; pci_walk_bus(dev->subordinate, cb, &result_data); if (cb == report_resume) { pci_cleanup_aer_uncorrect_error_status(dev); dev->error_state = pci_channel_io_normal; } } else { /* * If the error is reported by an end point, we think this * error is related to the upstream link of the end point. */ pci_walk_bus(dev->bus, cb, &result_data); } return result_data.result; } /** * aer_do_secondary_bus_reset - perform secondary bus reset * @dev: pointer to bridge's pci_dev data structure * * Invoked when performing link reset at Root Port or Downstream Port. */ void aer_do_secondary_bus_reset(struct pci_dev *dev) { u16 p2p_ctrl; /* Assert Secondary Bus Reset */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); /* * we should send hot reset message for 2ms to allow it time to * propagate to all downstream ports */ msleep(2); /* De-assert Secondary Bus Reset */ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); /* * System software must wait for at least 100ms from the end * of a reset of one or more device before it is permitted * to issue Configuration Requests to those devices. */ msleep(200); } /** * default_downstream_reset_link - default reset function for Downstream Port * @dev: pointer to downstream port's pci_dev data structure * * Invoked when performing link reset at Downstream Port w/ no aer driver. */ static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev) { aer_do_secondary_bus_reset(dev); dev_printk(KERN_DEBUG, &dev->dev, "Downstream Port link has been reset\n"); return PCI_ERS_RESULT_RECOVERED; } static int find_aer_service_iter(struct device *device, void *data) { struct pcie_port_service_driver *service_driver, **drv; drv = (struct pcie_port_service_driver **) data; if (device->bus == &pcie_port_bus_type && device->driver) { service_driver = to_service_driver(device->driver); if (service_driver->service == PCIE_PORT_SERVICE_AER) { *drv = service_driver; return 1; } } return 0; } static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev) { struct pcie_port_service_driver *drv = NULL; device_for_each_child(&dev->dev, &drv, find_aer_service_iter); return drv; } static pci_ers_result_t reset_link(struct pcie_device *aerdev, struct pci_dev *dev) { struct pci_dev *udev; pci_ers_result_t status; struct pcie_port_service_driver *driver; if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { /* Reset this port for all subordinates */ udev = dev; } else { /* Reset the upstream component (likely downstream port) */ udev = dev->bus->self; } /* Use the aer driver of the component firstly */ driver = find_aer_service(udev); if (driver && driver->reset_link) { status = driver->reset_link(udev); } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { status = default_downstream_reset_link(udev); } else { dev_printk(KERN_DEBUG, &dev->dev, "no link-reset support at upstream device %s\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } if (status != PCI_ERS_RESULT_RECOVERED) { dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream device %s failed\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } return status; } /** * do_recovery - handle nonfatal/fatal error recovery process * @aerdev: pointer to a pcie_device data structure of root port * @dev: pointer to a pci_dev data structure of agent detecting an error * @severity: error severity type * * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast * error detected message to all downstream drivers within a hierarchy in * question and return the returned code. */ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev, int severity) { pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED; enum pci_channel_state state; if (severity == AER_FATAL) state = pci_channel_io_frozen; else state = pci_channel_io_normal; status = broadcast_error_message(dev, state, "error_detected", report_error_detected); if (severity == AER_FATAL) { result = reset_link(aerdev, dev); if (result != PCI_ERS_RESULT_RECOVERED) goto failed; } if (status == PCI_ERS_RESULT_CAN_RECOVER) status = broadcast_error_message(dev, state, "mmio_enabled", report_mmio_enabled); if (status == PCI_ERS_RESULT_NEED_RESET) { /* * TODO: Should call platform-specific * functions to reset slot before calling * drivers' slot_reset callbacks? */ status = broadcast_error_message(dev, state, "slot_reset", report_slot_reset); } if (status != PCI_ERS_RESULT_RECOVERED) goto failed; broadcast_error_message(dev, state, "resume", report_resume); dev_printk(KERN_DEBUG, &dev->dev, "AER driver successfully recovered\n"); return; failed: /* TODO: Should kernel panic here? */ dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't recover\n"); } /** * handle_error_source - handle logging error into an event log * @aerdev: pointer to pcie_device data structure of the root port * @dev: pointer to pci_dev data structure of error source device * @info: comprehensive error information * * Invoked when an error being detected by Root Port. */ static void handle_error_source(struct pcie_device *aerdev, struct pci_dev *dev, struct aer_err_info *info) { int pos; if (info->severity == AER_CORRECTABLE) { /* * Correctable error does not need software intevention. * No need to go through error recovery process. */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (pos) pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, info->status); } else do_recovery(aerdev, dev, info->severity); } /** * get_device_error_info - read error status from dev and store it to info * @dev: pointer to the device expected to have a error record * @info: pointer to structure to store the error record * * Return 1 on success, 0 on error. * * Note that @info is reused among all error devices. Clear fields properly. */ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { int pos, temp; /* Must reset in this function */ info->status = 0; info->tlp_header_valid = 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* The device might not support AER */ if (!pos) return 1; if (info->severity == AER_CORRECTABLE) { pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &info->status); pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &info->mask); if (!(info->status & ~info->mask)) return 0; } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || info->severity == AER_NONFATAL) { /* Link is still healthy for IO reads */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &info->status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &info->mask); if (!(info->status & ~info->mask)) return 0; /* Get First Error Pointer */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); info->first_error = PCI_ERR_CAP_FEP(temp); if (info->status & AER_LOG_TLP_MASKS) { info->tlp_header_valid = 1; pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); pci_read_config_dword(dev, pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); } } return 1; } static inline void aer_process_err_devices(struct pcie_device *p_device, struct aer_err_info *e_info) { int i; /* Report all before handle them, not to lost records by reset etc. */ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { if (get_device_error_info(e_info->dev[i], e_info)) aer_print_error(e_info->dev[i], e_info); } for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { if (get_device_error_info(e_info->dev[i], e_info)) handle_error_source(p_device, e_info->dev[i], e_info); } } /** * aer_isr_one_error - consume an error detected by root port * @p_device: pointer to error root port service device * @e_src: pointer to an error source */ static void aer_isr_one_error(struct pcie_device *p_device, struct aer_err_source *e_src) { struct aer_err_info *e_info; /* struct aer_err_info might be big, so we allocate it with slab */ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL); if (!e_info) { dev_printk(KERN_DEBUG, &p_device->port->dev, "Can't allocate mem when processing AER errors\n"); return; } /* * There is a possibility that both correctable error and * uncorrectable error being logged. Report correctable error first. */ if (e_src->status & PCI_ERR_ROOT_COR_RCV) { e_info->id = ERR_COR_ID(e_src->id); e_info->severity = AER_CORRECTABLE; if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) e_info->multi_error_valid = 1; else e_info->multi_error_valid = 0; aer_print_port_info(p_device->port, e_info); if (find_source_device(p_device->port, e_info)) aer_process_err_devices(p_device, e_info); } if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { e_info->id = ERR_UNCOR_ID(e_src->id); if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) e_info->severity = AER_FATAL; else e_info->severity = AER_NONFATAL; if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) e_info->multi_error_valid = 1; else e_info->multi_error_valid = 0; aer_print_port_info(p_device->port, e_info); if (find_source_device(p_device->port, e_info)) aer_process_err_devices(p_device, e_info); } kfree(e_info); } /** * get_e_source - retrieve an error source * @rpc: pointer to the root port which holds an error * @e_src: pointer to store retrieved error source * * Return 1 if an error source is retrieved, otherwise 0. * * Invoked by DPC handler to consume an error. */ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src) { unsigned long flags; int ret = 0; /* Lock access to Root error producer/consumer index */ spin_lock_irqsave(&rpc->e_lock, flags); if (rpc->prod_idx != rpc->cons_idx) { *e_src = rpc->e_sources[rpc->cons_idx]; rpc->cons_idx++; if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) rpc->cons_idx = 0; ret = 1; } spin_unlock_irqrestore(&rpc->e_lock, flags); return ret; } /** * aer_isr - consume errors detected by root port * @work: definition of this work item * * Invoked, as DPC, when root port records new detected error */ void aer_isr(struct work_struct *work) { struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); struct pcie_device *p_device = rpc->rpd; struct aer_err_source e_src; mutex_lock(&rpc->rpc_mutex); while (get_e_source(rpc, &e_src)) aer_isr_one_error(p_device, &e_src); mutex_unlock(&rpc->rpc_mutex); wake_up(&rpc->wait_release); } /** * aer_init - provide AER initialization * @dev: pointer to AER pcie device * * Invoked when AER service driver is loaded. */ int aer_init(struct pcie_device *dev) { if (pcie_aer_get_firmware_first(dev->port)) { dev_printk(KERN_DEBUG, &dev->device, "PCIe errors handled by platform firmware.\n"); goto out; } if (aer_osc_setup(dev)) goto out; return 0; out: if (forceload) { dev_printk(KERN_DEBUG, &dev->device, "aerdrv forceload requested.\n"); pcie_aer_force_firmware_first(dev->port, 0); return 0; } return -ENXIO; }
gpl-2.0
jstotero/Cucciolone-Rewrite
drivers/platform/x86/intel_scu_ipc.c
759
22583
/* * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism * * (C) Copyright 2008-2010 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * SCU runing in ARC processor communicates with other entity running in IA * core through IPC mechanism which in turn messaging between IA core ad SCU. * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC) * along with other APIs. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/sysdev.h> #include <linux/pm.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ #define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ #define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ #define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ #define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ #define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ #define IPC_CMD_PCNTRL_R 1 /* Register read */ #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */ /* Miscelaneous Command ids */ #define IPC_CMD_INDIRECT_RD 2 /* 32bit indirect read */ #define IPC_CMD_INDIRECT_WR 5 /* 32bit indirect write */ /* * IPC register summary * * IPC register blocks are memory mapped at fixed address of 0xFF11C000 * To read or write information to the SCU, driver writes to IPC-1 memory * mapped registers (base address 0xFF11C000). The following is the IPC * mechanism * * 1. IA core cDMI interface claims this transaction and converts it to a * Transaction Layer Packet (TLP) message which is sent across the cDMI. * * 2. South Complex cDMI block receives this message and writes it to * the IPC-1 register block, causing an interrupt to the SCU * * 3. SCU firmware decodes this interrupt and IPC message and the appropriate * message handler is called within firmware. */ #define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */ #define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */ #define IPC_WWBUF_SIZE 16 /* IPC Write buffer Size */ #define IPC_RWBUF_SIZE 16 /* IPC Read buffer Size */ #define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */ #define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id); static void ipc_remove(struct pci_dev *pdev); struct intel_scu_ipc_dev { struct pci_dev *pdev; void __iomem *ipc_base; void __iomem *i2c_base; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ static int platform = 1; module_param(platform, int, 0); MODULE_PARM_DESC(platform, "1 for moorestown platform"); /* * IPC Read Buffer (Read Only): * 16 byte buffer for receiving data from SCU, if IPC command * processing results in response data */ #define IPC_READ_BUFFER 0x90 #define IPC_I2C_CNTRL_ADDR 0 #define I2C_DATA_ADDR 0x04 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ /* * Command Register (Write Only): * A write to this register results in an interrupt to the SCU core processor * Format: * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)| */ static inline void ipc_command(u32 cmd) /* Send ipc command */ { writel(cmd, ipcdev.ipc_base); } /* * IPC Write Buffer (Write Only): * 16-byte buffer for sending data associated with IPC command to * SCU. Size of the data is specified in the IPC_COMMAND_REG register */ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */ { writel(data, ipcdev.ipc_base + 0x80 + offset); } /* * IPC destination Pointer (Write Only): * Use content as pointer for destination write */ static inline void ipc_write_dptr(u32 data) /* Write dptr data */ { writel(data, ipcdev.ipc_base + 0x0C); } /* * IPC Source Pointer (Write Only): * Use content as pointer for read location */ static inline void ipc_write_sptr(u32 data) /* Write dptr data */ { writel(data, ipcdev.ipc_base + 0x08); } /* * Status Register (Read Only): * Driver will read this register to get the ready/busy status of the IPC * block and error status of the IPC command that was just processed by SCU * Format: * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)| */ static inline u8 ipc_read_status(void) { return __raw_readl(ipcdev.ipc_base + 0x04); } static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */ { return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } static inline u8 ipc_data_readl(u32 offset) /* Read ipc u32 data */ { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } static inline int busy_loop(void) /* Wait till scu status is busy */ { u32 status = 0; u32 loop_count = 0; status = ipc_read_status(); while (status & 1) { udelay(1); /* scu processing time is in few u secods */ status = ipc_read_status(); loop_count++; /* break if scu doesn't reset busy bit after huge retry */ if (loop_count > 100000) { dev_err(&ipcdev.pdev->dev, "IPC timed out"); return -ETIMEDOUT; } } return (status >> 1) & 1; } /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) { int nc; u32 offset = 0; u32 err = 0; u8 cbuf[IPC_WWBUF_SIZE] = { '\0' }; u32 *wbuf = (u32 *)&cbuf; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } if (platform == 1) { /* Entry is 4 bytes for read/write, 5 bytes for read modify */ for (nc = 0; nc < count; nc++) { cbuf[offset] = addr[nc]; cbuf[offset + 1] = addr[nc] >> 8; if (id != IPC_CMD_PCNTRL_R) cbuf[offset + 2] = data[nc]; if (id == IPC_CMD_PCNTRL_M) { cbuf[offset + 3] = data[nc + 1]; offset += 1; } offset += 3; } for (nc = 0, offset = 0; nc < count; nc++, offset += 4) ipc_data_writel(wbuf[nc], offset); /* Write wbuff */ } else { for (nc = 0, offset = 0; nc < count; nc++, offset += 2) ipc_data_writel(addr[nc], offset); /* Write addresses */ if (id != IPC_CMD_PCNTRL_R) { for (nc = 0; nc < count; nc++, offset++) ipc_data_writel(data[nc], offset); /* Write data */ if (id == IPC_CMD_PCNTRL_M) ipc_data_writel(data[nc + 1], offset); /* Mask value*/ } } if (id != IPC_CMD_PCNTRL_M) ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op); else ipc_command((count * 4) << 16 | id << 12 | 0 << 8 | op); err = busy_loop(); if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ /* Workaround: values are read as 0 without memcpy_fromio */ memcpy_fromio(cbuf, ipcdev.ipc_base + IPC_READ_BUFFER, 16); if (platform == 1) { for (nc = 0, offset = 2; nc < count; nc++, offset += 3) data[nc] = ipc_data_readb(offset); } else { for (nc = 0; nc < count; nc++) data[nc] = ipc_data_readb(nc); } } mutex_unlock(&ipclock); return err; } /** * intel_scu_ipc_ioread8 - read a word via the SCU * @addr: register on SCU * @data: return pointer for read byte * * Read a single register. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** * intel_scu_ipc_ioread16 - read a word via the SCU * @addr: register on SCU * @data: return pointer for read word * * Read a register pair. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread16(u16 addr, u16 *data) { u16 x[2] = {addr, addr + 1 }; return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread16); /** * intel_scu_ipc_ioread32 - read a dword via the SCU * @addr: register on SCU * @data: return pointer for read dword * * Read four registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_ioread32(u16 addr, u32 *data) { u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread32); /** * intel_scu_ipc_iowrite8 - write a byte via the SCU * @addr: register on SCU * @data: byte to write * * Write a single register. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** * intel_scu_ipc_iowrite16 - write a word via the SCU * @addr: register on SCU * @data: word to write * * Write two registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite16(u16 addr, u16 data) { u16 x[2] = {addr, addr + 1 }; return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite16); /** * intel_scu_ipc_iowrite32 - write a dword via the SCU * @addr: register on SCU * @data: dword to write * * Write four registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * This function may sleep. */ int intel_scu_ipc_iowrite32(u16 addr, u32 data) { u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite32); /** * intel_scu_ipc_readvv - read a set of registers * @addr: register list * @data: bytes to return * @len: length of array * * Read registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * The largest array length permitted by the hardware is 5 items. * * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_readv); /** * intel_scu_ipc_writev - write a set of registers * @addr: register list * @data: bytes to write * @len: length of array * * Write registers. Returns 0 on success or an error code. All * locking between SCU accesses is handled for the caller. * * The largest array length permitted by the hardware is 5 items. * * This function may sleep. * */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_writev); /** * intel_scu_ipc_update_register - r/m/w a register * @addr: register address * @bits: bits to update * @mask: mask of bits to update * * Read-modify-write power control unit register. The first data argument * must be register value and second is mask value * mask is a bitmap that indicates which bits to update. * 0 = masked. Don't modify this bit, 1 = modify this bit. * returns 0 on success or an error code. * * This function may sleep. Locking between SCU accesses is handled * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { u8 data[2] = { bits, mask }; return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M); } EXPORT_SYMBOL(intel_scu_ipc_update_register); /** * intel_scu_ipc_register_read - 32bit indirect read * @addr: register address * @value: 32bit value return * * Performs IA 32 bit indirect read, returns 0 on success, or an * error code. * * Can be used when SCCB(System Controller Configuration Block) register * HRIM(Honor Restricted IPC Messages) is set (bit 23) * * This function may sleep. Locking for SCU accesses is handled for * the caller. */ int intel_scu_ipc_register_read(u32 addr, u32 *value) { u32 err = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } ipc_write_sptr(addr); ipc_command(4 << 16 | IPC_CMD_INDIRECT_RD); err = busy_loop(); *value = ipc_data_readl(0); mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_register_read); /** * intel_scu_ipc_register_write - 32bit indirect write * @addr: register address * @value: 32bit value to write * * Performs IA 32 bit indirect write, returns 0 on success, or an * error code. * * Can be used when SCCB(System Controller Configuration Block) register * HRIM(Honor Restricted IPC Messages) is set (bit 23) * * This function may sleep. Locking for SCU accesses is handled for * the caller. */ int intel_scu_ipc_register_write(u32 addr, u32 value) { u32 err = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } ipc_write_dptr(addr); ipc_data_writel(value, 0); ipc_command(4 << 16 | IPC_CMD_INDIRECT_WR); err = busy_loop(); mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_register_write); /** * intel_scu_ipc_simple_command - send a simple command * @cmd: command * @sub: sub type * * Issue a simple command to the SCU. Do not use this interface if * you must then access data as any data values may be overwritten * by another SCU access by the time this function returns. * * This function may sleep. Locking for SCU accesses is handled for * the caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { u32 err = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } ipc_command(sub << 12 | cmd); err = busy_loop(); mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** * intel_scu_ipc_command - command with data * @cmd: command * @sub: sub type * @in: input data * @inlen: input length in dwords * @out: output data * @outlein: output length in dwords * * Issue a command to the SCU which involves data transfers. Do the * data copies under the lock but leave it for the caller to interpret */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) { u32 err = 0; int i = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } for (i = 0; i < inlen; i++) ipc_data_writel(*in++, 4 * i); ipc_command((sub << 12) | cmd | (inlen << 18)); err = busy_loop(); for (i = 0; i < outlen; i++) *out++ = ipc_data_readl(4 * i); mutex_unlock(&ipclock); return err; } EXPORT_SYMBOL(intel_scu_ipc_command); /*I2C commands */ #define IPC_I2C_WRITE 1 /* I2C Write command */ #define IPC_I2C_READ 2 /* I2C Read command */ /** * intel_scu_ipc_i2c_cntrl - I2C read/write operations * @addr: I2C address + command bits * @data: data to read/write * * Perform an an I2C read/write operation via the SCU. All locking is * handled for the caller. This function may sleep. * * Returns an error code or 0 on success. * * This has to be in the IPC driver for the locking. */ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) { u32 cmd = 0; mutex_lock(&ipclock); if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; } cmd = (addr >> 24) & 0xFF; if (cmd == IPC_I2C_READ) { writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); /* Write not getting updated without delay */ mdelay(1); *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); } else if (cmd == IPC_I2C_WRITE) { writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR); mdelay(1); writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); } else { dev_err(&ipcdev.pdev->dev, "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); mutex_unlock(&ipclock); return -1; } mutex_unlock(&ipclock); return 0; } EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); #define IPC_FW_LOAD_ADDR 0xFFFC0000 /* Storage location for FW image */ #define IPC_FW_UPDATE_MBOX_ADDR 0xFFFFDFF4 /* Mailbox between ipc and scu */ #define IPC_MAX_FW_SIZE 262144 /* 256K storage size for loading the FW image */ #define IPC_FW_MIP_HEADER_SIZE 2048 /* Firmware MIP header size */ /* IPC inform SCU to get ready for update process */ #define IPC_CMD_FW_UPDATE_READY 0x10FE /* IPC inform SCU to go for update process */ #define IPC_CMD_FW_UPDATE_GO 0x20FE /* Status code for fw update */ #define IPC_FW_UPDATE_SUCCESS 0x444f4e45 /* Status code 'DONE' */ #define IPC_FW_UPDATE_BADN 0x4241444E /* Status code 'BADN' */ #define IPC_FW_TXHIGH 0x54784849 /* Status code 'IPC_FW_TXHIGH' */ #define IPC_FW_TXLOW 0x54784c4f /* Status code 'IPC_FW_TXLOW' */ struct fw_update_mailbox { u32 status; u32 scu_flag; u32 driver_flag; }; /** * intel_scu_ipc_fw_update - Firmware update utility * @buffer: firmware buffer * @length: size of firmware buffer * * This function provides an interface to load the firmware into * the SCU. Returns 0 on success or -1 on failure */ int intel_scu_ipc_fw_update(u8 *buffer, u32 length) { void __iomem *fw_update_base; struct fw_update_mailbox __iomem *mailbox = NULL; int retry_cnt = 0; u32 status; mutex_lock(&ipclock); fw_update_base = ioremap_nocache(IPC_FW_LOAD_ADDR, (128*1024)); if (fw_update_base == NULL) { mutex_unlock(&ipclock); return -ENOMEM; } mailbox = ioremap_nocache(IPC_FW_UPDATE_MBOX_ADDR, sizeof(struct fw_update_mailbox)); if (mailbox == NULL) { iounmap(fw_update_base); mutex_unlock(&ipclock); return -ENOMEM; } ipc_command(IPC_CMD_FW_UPDATE_READY); /* Intitialize mailbox */ writel(0, &mailbox->status); writel(0, &mailbox->scu_flag); writel(0, &mailbox->driver_flag); /* Driver copies the 2KB MIP header to SRAM at 0xFFFC0000*/ memcpy_toio(fw_update_base, buffer, 0x800); /* Driver sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02). * Upon receiving this command, SCU will write the 2K MIP header * from 0xFFFC0000 into NAND. * SCU will write a status code into the Mailbox, and then set scu_flag. */ ipc_command(IPC_CMD_FW_UPDATE_GO); /*Driver stalls until scu_flag is set */ while (readl(&mailbox->scu_flag) != 1) { rmb(); mdelay(1); } /* Driver checks Mailbox status. * If the status is 'BADN', then abort (bad NAND). * If the status is 'IPC_FW_TXLOW', then continue. */ while (readl(&mailbox->status) != IPC_FW_TXLOW) { rmb(); mdelay(10); } mdelay(10); update_retry: if (retry_cnt > 5) goto update_end; if (readl(&mailbox->status) != IPC_FW_TXLOW) goto update_end; buffer = buffer + 0x800; memcpy_toio(fw_update_base, buffer, 0x20000); writel(1, &mailbox->driver_flag); while (readl(&mailbox->scu_flag) == 1) { rmb(); mdelay(1); } /* check for 'BADN' */ if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN) goto update_end; while (readl(&mailbox->status) != IPC_FW_TXHIGH) { rmb(); mdelay(10); } mdelay(10); if (readl(&mailbox->status) != IPC_FW_TXHIGH) goto update_end; buffer = buffer + 0x20000; memcpy_toio(fw_update_base, buffer, 0x20000); writel(0, &mailbox->driver_flag); while (mailbox->scu_flag == 0) { rmb(); mdelay(1); } /* check for 'BADN' */ if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN) goto update_end; if (readl(&mailbox->status) == IPC_FW_TXLOW) { ++retry_cnt; goto update_retry; } update_end: status = readl(&mailbox->status); iounmap(fw_update_base); iounmap(mailbox); mutex_unlock(&ipclock); if (status == IPC_FW_UPDATE_SUCCESS) return 0; return -1; } EXPORT_SYMBOL(intel_scu_ipc_fw_update); /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called * which in turn unlocks the caller api. Currently this is not used * * This is edge triggered so we need take no action to clear anything */ static irqreturn_t ioc(int irq, void *dev_id) { return IRQ_HANDLED; } /** * ipc_probe - probe an Intel SCU IPC * @dev: the PCI device matching * @id: entry in the match table * * Enable and install an intel SCU IPC. This appears in the PCI space * but uses some hard coded addresses as well. */ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; resource_size_t pci_resource; if (ipcdev.pdev) /* We support only one SCU */ return -EBUSY; ipcdev.pdev = pci_dev_get(dev); err = pci_enable_device(dev); if (err) return err; err = pci_request_regions(dev, "intel_scu_ipc"); if (err) return err; pci_resource = pci_resource_start(dev, 0); if (!pci_resource) return -ENOMEM; if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) return -EBUSY; ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR); if (!ipcdev.ipc_base) return -ENOMEM; ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR); if (!ipcdev.i2c_base) { iounmap(ipcdev.ipc_base); return -ENOMEM; } return 0; } /** * ipc_remove - remove a bound IPC device * @pdev: PCI device * * In practice the SCU is not removable but this function is also * called for each device on a module unload or cleanup which is the * path that will get used. * * Free up the mappings and release the PCI resources */ static void ipc_remove(struct pci_dev *pdev) { free_irq(pdev->irq, &ipcdev); pci_release_regions(pdev); pci_dev_put(ipcdev.pdev); iounmap(ipcdev.ipc_base); iounmap(ipcdev.i2c_base); ipcdev.pdev = NULL; } static const struct pci_device_id pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}, { 0,} }; MODULE_DEVICE_TABLE(pci, pci_ids); static struct pci_driver ipc_driver = { .name = "intel_scu_ipc", .id_table = pci_ids, .probe = ipc_probe, .remove = ipc_remove, }; static int __init intel_scu_ipc_init(void) { return pci_register_driver(&ipc_driver); } static void __exit intel_scu_ipc_exit(void) { pci_unregister_driver(&ipc_driver); } MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>"); MODULE_DESCRIPTION("Intel SCU IPC driver"); MODULE_LICENSE("GPL"); module_init(intel_scu_ipc_init); module_exit(intel_scu_ipc_exit);
gpl-2.0
farchanrifai/Foxy
fs/xfs/xfs_ioctl32.c
3831
18411
/* * Copyright (c) 2004-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/compat.h> #include <linux/ioctl.h> #include <linux/mount.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_vnode.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_itable.h" #include "xfs_error.h" #include "xfs_dfrag.h" #include "xfs_vnodeops.h" #include "xfs_fsops.h" #include "xfs_alloc.h" #include "xfs_rtalloc.h" #include "xfs_attr.h" #include "xfs_ioctl.h" #include "xfs_ioctl32.h" #include "xfs_trace.h" #define _NATIVE_IOC(cmd, type) \ _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) #ifdef BROKEN_X86_ALIGNMENT STATIC int xfs_compat_flock64_copyin( xfs_flock64_t *bf, compat_xfs_flock64_t __user *arg32) { if (get_user(bf->l_type, &arg32->l_type) || get_user(bf->l_whence, &arg32->l_whence) || get_user(bf->l_start, &arg32->l_start) || get_user(bf->l_len, &arg32->l_len) || get_user(bf->l_sysid, &arg32->l_sysid) || get_user(bf->l_pid, &arg32->l_pid) || copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) return -XFS_ERROR(EFAULT); return 0; } STATIC int xfs_compat_ioc_fsgeometry_v1( struct xfs_mount *mp, compat_xfs_fsop_geom_v1_t __user *arg32) { xfs_fsop_geom_t fsgeo; int error; error = xfs_fs_geometry(mp, &fsgeo, 3); if (error) return -error; /* The 32-bit variant simply has some padding at the end */ if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) return -XFS_ERROR(EFAULT); return 0; } STATIC int xfs_compat_growfs_data_copyin( struct xfs_growfs_data *in, compat_xfs_growfs_data_t __user *arg32) { if (get_user(in->newblocks, &arg32->newblocks) || get_user(in->imaxpct, &arg32->imaxpct)) return -XFS_ERROR(EFAULT); return 0; } STATIC int xfs_compat_growfs_rt_copyin( struct xfs_growfs_rt *in, compat_xfs_growfs_rt_t __user *arg32) { if (get_user(in->newblocks, &arg32->newblocks) || get_user(in->extsize, &arg32->extsize)) return -XFS_ERROR(EFAULT); return 0; } STATIC int xfs_inumbers_fmt_compat( void __user *ubuffer, const xfs_inogrp_t *buffer, long count, long *written) { compat_xfs_inogrp_t __user *p32 = ubuffer; long i; for (i = 0; i < count; i++) { if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) return -XFS_ERROR(EFAULT); } *written = count * sizeof(*p32); return 0; } #else #define xfs_inumbers_fmt_compat xfs_inumbers_fmt #endif /* BROKEN_X86_ALIGNMENT */ STATIC int xfs_ioctl32_bstime_copyin( xfs_bstime_t *bstime, compat_xfs_bstime_t __user *bstime32) { compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */ if (get_user(sec32, &bstime32->tv_sec) || get_user(bstime->tv_nsec, &bstime32->tv_nsec)) return -XFS_ERROR(EFAULT); bstime->tv_sec = sec32; return 0; } /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */ STATIC int xfs_ioctl32_bstat_copyin( xfs_bstat_t *bstat, compat_xfs_bstat_t __user *bstat32) { if (get_user(bstat->bs_ino, &bstat32->bs_ino) || get_user(bstat->bs_mode, &bstat32->bs_mode) || get_user(bstat->bs_nlink, &bstat32->bs_nlink) || get_user(bstat->bs_uid, &bstat32->bs_uid) || get_user(bstat->bs_gid, &bstat32->bs_gid) || get_user(bstat->bs_rdev, &bstat32->bs_rdev) || get_user(bstat->bs_blksize, &bstat32->bs_blksize) || get_user(bstat->bs_size, &bstat32->bs_size) || xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || get_user(bstat->bs_blocks, &bstat32->bs_size) || get_user(bstat->bs_xflags, &bstat32->bs_size) || get_user(bstat->bs_extsize, &bstat32->bs_extsize) || get_user(bstat->bs_extents, &bstat32->bs_extents) || get_user(bstat->bs_gen, &bstat32->bs_gen) || get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || get_user(bstat->bs_aextents, &bstat32->bs_aextents)) return -XFS_ERROR(EFAULT); return 0; } /* XFS_IOC_FSBULKSTAT and friends */ STATIC int xfs_bstime_store_compat( compat_xfs_bstime_t __user *p32, const xfs_bstime_t *p) { __s32 sec32; sec32 = p->tv_sec; if (put_user(sec32, &p32->tv_sec) || put_user(p->tv_nsec, &p32->tv_nsec)) return -XFS_ERROR(EFAULT); return 0; } /* Return 0 on success or positive error (to xfs_bulkstat()) */ STATIC int xfs_bulkstat_one_fmt_compat( void __user *ubuffer, int ubsize, int *ubused, const xfs_bstat_t *buffer) { compat_xfs_bstat_t __user *p32 = ubuffer; if (ubsize < sizeof(*p32)) return XFS_ERROR(ENOMEM); if (put_user(buffer->bs_ino, &p32->bs_ino) || put_user(buffer->bs_mode, &p32->bs_mode) || put_user(buffer->bs_nlink, &p32->bs_nlink) || put_user(buffer->bs_uid, &p32->bs_uid) || put_user(buffer->bs_gid, &p32->bs_gid) || put_user(buffer->bs_rdev, &p32->bs_rdev) || put_user(buffer->bs_blksize, &p32->bs_blksize) || put_user(buffer->bs_size, &p32->bs_size) || xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || put_user(buffer->bs_blocks, &p32->bs_blocks) || put_user(buffer->bs_xflags, &p32->bs_xflags) || put_user(buffer->bs_extsize, &p32->bs_extsize) || put_user(buffer->bs_extents, &p32->bs_extents) || put_user(buffer->bs_gen, &p32->bs_gen) || put_user(buffer->bs_projid, &p32->bs_projid) || put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || put_user(buffer->bs_dmstate, &p32->bs_dmstate) || put_user(buffer->bs_aextents, &p32->bs_aextents)) return XFS_ERROR(EFAULT); if (ubused) *ubused = sizeof(*p32); return 0; } STATIC int xfs_bulkstat_one_compat( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* buffer to place output in */ int ubsize, /* size of buffer */ int *ubused, /* bytes used by me */ int *stat) /* BULKSTAT_RV_... */ { return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, xfs_bulkstat_one_fmt_compat, ubused, stat); } /* copied from xfs_ioctl.c */ STATIC int xfs_compat_ioc_bulkstat( xfs_mount_t *mp, unsigned int cmd, compat_xfs_fsop_bulkreq_t __user *p32) { u32 addr; xfs_fsop_bulkreq_t bulkreq; int count; /* # of records returned */ xfs_ino_t inlast; /* last inode number */ int done; int error; /* done = 1 if there are more stats to get and if bulkstat */ /* should be called again (unused here, but used in dmapi) */ if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); if (get_user(addr, &p32->lastip)) return -XFS_ERROR(EFAULT); bulkreq.lastip = compat_ptr(addr); if (get_user(bulkreq.icount, &p32->icount) || get_user(addr, &p32->ubuffer)) return -XFS_ERROR(EFAULT); bulkreq.ubuffer = compat_ptr(addr); if (get_user(addr, &p32->ocount)) return -XFS_ERROR(EFAULT); bulkreq.ocount = compat_ptr(addr); if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) return -XFS_ERROR(EFAULT); if ((count = bulkreq.icount) <= 0) return -XFS_ERROR(EINVAL); if (bulkreq.ubuffer == NULL) return -XFS_ERROR(EINVAL); if (cmd == XFS_IOC_FSINUMBERS_32) { error = xfs_inumbers(mp, &inlast, &count, bulkreq.ubuffer, xfs_inumbers_fmt_compat); } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { int res; error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, sizeof(compat_xfs_bstat_t), NULL, &res); } else if (cmd == XFS_IOC_FSBULKSTAT_32) { error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, &done); } else error = XFS_ERROR(EINVAL); if (error) return -error; if (bulkreq.ocount != NULL) { if (copy_to_user(bulkreq.lastip, &inlast, sizeof(xfs_ino_t))) return -XFS_ERROR(EFAULT); if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) return -XFS_ERROR(EFAULT); } return 0; } STATIC int xfs_compat_handlereq_copyin( xfs_fsop_handlereq_t *hreq, compat_xfs_fsop_handlereq_t __user *arg32) { compat_xfs_fsop_handlereq_t hreq32; if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) return -XFS_ERROR(EFAULT); hreq->fd = hreq32.fd; hreq->path = compat_ptr(hreq32.path); hreq->oflags = hreq32.oflags; hreq->ihandle = compat_ptr(hreq32.ihandle); hreq->ihandlen = hreq32.ihandlen; hreq->ohandle = compat_ptr(hreq32.ohandle); hreq->ohandlen = compat_ptr(hreq32.ohandlen); return 0; } STATIC struct dentry * xfs_compat_handlereq_to_dentry( struct file *parfilp, compat_xfs_fsop_handlereq_t *hreq) { return xfs_handle_to_dentry(parfilp, compat_ptr(hreq->ihandle), hreq->ihandlen); } STATIC int xfs_compat_attrlist_by_handle( struct file *parfilp, void __user *arg) { int error; attrlist_cursor_kern_t *cursor; compat_xfs_fsop_attrlist_handlereq_t al_hreq; struct dentry *dentry; char *kbuf; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); if (copy_from_user(&al_hreq, arg, sizeof(compat_xfs_fsop_attrlist_handlereq_t))) return -XFS_ERROR(EFAULT); if (al_hreq.buflen > XATTR_LIST_MAX) return -XFS_ERROR(EINVAL); /* * Reject flags, only allow namespaces. */ if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) return -XFS_ERROR(EINVAL); dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); if (IS_ERR(dentry)) return PTR_ERR(dentry); error = -ENOMEM; kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); if (!kbuf) goto out_dput; cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, al_hreq.flags, cursor); if (error) goto out_kfree; if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen)) error = -EFAULT; out_kfree: kfree(kbuf); out_dput: dput(dentry); return error; } STATIC int xfs_compat_attrmulti_by_handle( struct file *parfilp, void __user *arg) { int error; compat_xfs_attr_multiop_t *ops; compat_xfs_fsop_attrmulti_handlereq_t am_hreq; struct dentry *dentry; unsigned int i, size; unsigned char *attr_name; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); if (copy_from_user(&am_hreq, arg, sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) return -XFS_ERROR(EFAULT); /* overflow check */ if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) return -E2BIG; dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); if (IS_ERR(dentry)) return PTR_ERR(dentry); error = E2BIG; size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); if (!size || size > 16 * PAGE_SIZE) goto out_dput; ops = memdup_user(compat_ptr(am_hreq.ops), size); if (IS_ERR(ops)) { error = PTR_ERR(ops); goto out_dput; } attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); if (!attr_name) goto out_kfree_ops; error = 0; for (i = 0; i < am_hreq.opcount; i++) { ops[i].am_error = strncpy_from_user((char *)attr_name, compat_ptr(ops[i].am_attrname), MAXNAMELEN); if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) error = -ERANGE; if (ops[i].am_error < 0) break; switch (ops[i].am_opcode) { case ATTR_OP_GET: ops[i].am_error = xfs_attrmulti_attr_get( dentry->d_inode, attr_name, compat_ptr(ops[i].am_attrvalue), &ops[i].am_length, ops[i].am_flags); break; case ATTR_OP_SET: ops[i].am_error = mnt_want_write_file(parfilp); if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_set( dentry->d_inode, attr_name, compat_ptr(ops[i].am_attrvalue), ops[i].am_length, ops[i].am_flags); mnt_drop_write_file(parfilp); break; case ATTR_OP_REMOVE: ops[i].am_error = mnt_want_write_file(parfilp); if (ops[i].am_error) break; ops[i].am_error = xfs_attrmulti_attr_remove( dentry->d_inode, attr_name, ops[i].am_flags); mnt_drop_write_file(parfilp); break; default: ops[i].am_error = EINVAL; } } if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) error = XFS_ERROR(EFAULT); kfree(attr_name); out_kfree_ops: kfree(ops); out_dput: dput(dentry); return -error; } STATIC int xfs_compat_fssetdm_by_handle( struct file *parfilp, void __user *arg) { int error; struct fsdmidata fsd; compat_xfs_fsop_setdm_handlereq_t dmhreq; struct dentry *dentry; if (!capable(CAP_MKNOD)) return -XFS_ERROR(EPERM); if (copy_from_user(&dmhreq, arg, sizeof(compat_xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { error = -XFS_ERROR(EPERM); goto out; } if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { error = -XFS_ERROR(EFAULT); goto out; } error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, fsd.fsd_dmstate); out: dput(dentry); return error; } long xfs_file_compat_ioctl( struct file *filp, unsigned cmd, unsigned long p) { struct inode *inode = filp->f_path.dentry->d_inode; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; void __user *arg = (void __user *)p; int ioflags = 0; int error; if (filp->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; trace_xfs_file_compat_ioctl(ip); switch (cmd) { /* No size or alignment issues on any arch */ case XFS_IOC_DIOINFO: case XFS_IOC_FSGEOMETRY: case XFS_IOC_FSGETXATTR: case XFS_IOC_FSSETXATTR: case XFS_IOC_FSGETXATTRA: case XFS_IOC_FSSETDM: case XFS_IOC_GETBMAP: case XFS_IOC_GETBMAPA: case XFS_IOC_GETBMAPX: case XFS_IOC_FSCOUNTS: case XFS_IOC_SET_RESBLKS: case XFS_IOC_GET_RESBLKS: case XFS_IOC_FSGROWFSLOG: case XFS_IOC_GOINGDOWN: case XFS_IOC_ERROR_INJECTION: case XFS_IOC_ERROR_CLEARALL: return xfs_file_ioctl(filp, cmd, p); #ifndef BROKEN_X86_ALIGNMENT /* These are handled fine if no alignment issues */ case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: case XFS_IOC_RESVSP: case XFS_IOC_UNRESVSP: case XFS_IOC_ALLOCSP64: case XFS_IOC_FREESP64: case XFS_IOC_RESVSP64: case XFS_IOC_UNRESVSP64: case XFS_IOC_FSGEOMETRY_V1: case XFS_IOC_FSGROWFSDATA: case XFS_IOC_FSGROWFSRT: case XFS_IOC_ZERO_RANGE: return xfs_file_ioctl(filp, cmd, p); #else case XFS_IOC_ALLOCSP_32: case XFS_IOC_FREESP_32: case XFS_IOC_ALLOCSP64_32: case XFS_IOC_FREESP64_32: case XFS_IOC_RESVSP_32: case XFS_IOC_UNRESVSP_32: case XFS_IOC_RESVSP64_32: case XFS_IOC_UNRESVSP64_32: case XFS_IOC_ZERO_RANGE_32: { struct xfs_flock64 bf; if (xfs_compat_flock64_copyin(&bf, arg)) return -XFS_ERROR(EFAULT); cmd = _NATIVE_IOC(cmd, struct xfs_flock64); return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); } case XFS_IOC_FSGEOMETRY_V1_32: return xfs_compat_ioc_fsgeometry_v1(mp, arg); case XFS_IOC_FSGROWFSDATA_32: { struct xfs_growfs_data in; if (xfs_compat_growfs_data_copyin(&in, arg)) return -XFS_ERROR(EFAULT); error = xfs_growfs_data(mp, &in); return -error; } case XFS_IOC_FSGROWFSRT_32: { struct xfs_growfs_rt in; if (xfs_compat_growfs_rt_copyin(&in, arg)) return -XFS_ERROR(EFAULT); error = xfs_growfs_rt(mp, &in); return -error; } #endif /* long changes size, but xfs only copiese out 32 bits */ case XFS_IOC_GETXFLAGS_32: case XFS_IOC_SETXFLAGS_32: case XFS_IOC_GETVERSION_32: cmd = _NATIVE_IOC(cmd, long); return xfs_file_ioctl(filp, cmd, p); case XFS_IOC_SWAPEXT_32: { struct xfs_swapext sxp; struct compat_xfs_swapext __user *sxu = arg; /* Bulk copy in up to the sx_stat field, then copy bstat */ if (copy_from_user(&sxp, sxu, offsetof(struct xfs_swapext, sx_stat)) || xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) return -XFS_ERROR(EFAULT); error = xfs_swapext(&sxp); return -error; } case XFS_IOC_FSBULKSTAT_32: case XFS_IOC_FSBULKSTAT_SINGLE_32: case XFS_IOC_FSINUMBERS_32: return xfs_compat_ioc_bulkstat(mp, cmd, arg); case XFS_IOC_FD_TO_HANDLE_32: case XFS_IOC_PATH_TO_HANDLE_32: case XFS_IOC_PATH_TO_FSHANDLE_32: { struct xfs_fsop_handlereq hreq; if (xfs_compat_handlereq_copyin(&hreq, arg)) return -XFS_ERROR(EFAULT); cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); return xfs_find_handle(cmd, &hreq); } case XFS_IOC_OPEN_BY_HANDLE_32: { struct xfs_fsop_handlereq hreq; if (xfs_compat_handlereq_copyin(&hreq, arg)) return -XFS_ERROR(EFAULT); return xfs_open_by_handle(filp, &hreq); } case XFS_IOC_READLINK_BY_HANDLE_32: { struct xfs_fsop_handlereq hreq; if (xfs_compat_handlereq_copyin(&hreq, arg)) return -XFS_ERROR(EFAULT); return xfs_readlink_by_handle(filp, &hreq); } case XFS_IOC_ATTRLIST_BY_HANDLE_32: return xfs_compat_attrlist_by_handle(filp, arg); case XFS_IOC_ATTRMULTI_BY_HANDLE_32: return xfs_compat_attrmulti_by_handle(filp, arg); case XFS_IOC_FSSETDM_BY_HANDLE_32: return xfs_compat_fssetdm_by_handle(filp, arg); default: return -XFS_ERROR(ENOIOCTLCMD); } }
gpl-2.0
pershoot/kernel-2634
arch/arm/mach-loki/addr-map.c
4087
2869
/* * arch/arm/mach-loki/addr-map.c * * Address map functions for Marvell Loki (88RC8480) SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mbus.h> #include <linux/io.h> #include <mach/hardware.h> #include "common.h" /* * Generic Address Decode Windows bit settings */ #define TARGET_DDR 0 #define TARGET_DEV_BUS 1 #define TARGET_PCIE0 3 #define TARGET_PCIE1 4 #define ATTR_DEV_BOOT 0x0f #define ATTR_DEV_CS2 0x1b #define ATTR_DEV_CS1 0x1d #define ATTR_DEV_CS0 0x1e #define ATTR_PCIE_IO 0x51 #define ATTR_PCIE_MEM 0x59 /* * Helpers to get DDR bank info */ #define DDR_SIZE_CS(n) DDR_REG(0x1500 + ((n) << 3)) #define DDR_BASE_CS(n) DDR_REG(0x1504 + ((n) << 3)) /* * CPU Address Decode Windows registers */ #define BRIDGE_REG(x) (BRIDGE_VIRT_BASE | (x)) #define CPU_WIN_CTRL(n) BRIDGE_REG(0x000 | ((n) << 4)) #define CPU_WIN_BASE(n) BRIDGE_REG(0x004 | ((n) << 4)) #define CPU_WIN_REMAP_LO(n) BRIDGE_REG(0x008 | ((n) << 4)) #define CPU_WIN_REMAP_HI(n) BRIDGE_REG(0x00c | ((n) << 4)) struct mbus_dram_target_info loki_mbus_dram_info; static void __init setup_cpu_win(int win, u32 base, u32 size, u8 target, u8 attr, int remap) { u32 ctrl; base &= 0xffff0000; ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (1 << 5) | target; writel(base, CPU_WIN_BASE(win)); writel(ctrl, CPU_WIN_CTRL(win)); if (win < 2) { if (remap < 0) remap = base; writel(remap & 0xffff0000, CPU_WIN_REMAP_LO(win)); writel(0, CPU_WIN_REMAP_HI(win)); } } void __init loki_setup_cpu_mbus(void) { int i; int cs; /* * First, disable and clear windows. */ for (i = 0; i < 8; i++) { writel(0, CPU_WIN_BASE(i)); writel(0, CPU_WIN_CTRL(i)); if (i < 2) { writel(0, CPU_WIN_REMAP_LO(i)); writel(0, CPU_WIN_REMAP_HI(i)); } } /* * Setup windows for PCIe IO+MEM space. */ setup_cpu_win(2, LOKI_PCIE0_MEM_PHYS_BASE, LOKI_PCIE0_MEM_SIZE, TARGET_PCIE0, ATTR_PCIE_MEM, -1); setup_cpu_win(3, LOKI_PCIE1_MEM_PHYS_BASE, LOKI_PCIE1_MEM_SIZE, TARGET_PCIE1, ATTR_PCIE_MEM, -1); /* * Setup MBUS dram target info. */ loki_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; for (i = 0, cs = 0; i < 4; i++) { u32 base = readl(DDR_BASE_CS(i)); u32 size = readl(DDR_SIZE_CS(i)); /* * Chip select enabled? */ if (size & 1) { struct mbus_dram_window *w; w = &loki_mbus_dram_info.cs[cs++]; w->cs_index = i; w->mbus_attr = 0xf & ~(1 << i); w->base = base & 0xffff0000; w->size = (size | 0x0000ffff) + 1; } } loki_mbus_dram_info.num_cs = cs; } void __init loki_setup_dev_boot_win(u32 base, u32 size) { setup_cpu_win(4, base, size, TARGET_DEV_BUS, ATTR_DEV_BOOT, -1); }
gpl-2.0
xcstacy/flo-kernel
kernel/pid_namespace.c
4087
6016
/* * Pid namespaces * * Authors: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/syscalls.h> #include <linux/err.h> #include <linux/acct.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/reboot.h> #define BITS_PER_PAGE (PAGE_SIZE*8) struct pid_cache { int nr_ids; char name[16]; struct kmem_cache *cachep; struct list_head list; }; static LIST_HEAD(pid_caches_lh); static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* * creates the kmem cache to allocate pids from. * @nr_ids: the number of numerical ids this pid will have to carry */ static struct kmem_cache *create_pid_cachep(int nr_ids) { struct pid_cache *pcache; struct kmem_cache *cachep; mutex_lock(&pid_caches_mutex); list_for_each_entry(pcache, &pid_caches_lh, list) if (pcache->nr_ids == nr_ids) goto out; pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); if (pcache == NULL) goto err_alloc; snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); cachep = kmem_cache_create(pcache->name, sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), 0, SLAB_HWCACHE_ALIGN, NULL); if (cachep == NULL) goto err_cachep; pcache->nr_ids = nr_ids; pcache->cachep = cachep; list_add(&pcache->list, &pid_caches_lh); out: mutex_unlock(&pid_caches_mutex); return pcache->cachep; err_cachep: kfree(pcache); err_alloc: mutex_unlock(&pid_caches_mutex); return NULL; } static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) { struct pid_namespace *ns; unsigned int level = parent_pid_ns->level + 1; int i, err = -ENOMEM; ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); if (ns == NULL) goto out; ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!ns->pidmap[0].page) goto out_free; ns->pid_cachep = create_pid_cachep(level + 1); if (ns->pid_cachep == NULL) goto out_free_map; kref_init(&ns->kref); ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); set_bit(0, ns->pidmap[0].page); atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); for (i = 1; i < PIDMAP_ENTRIES; i++) atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); err = pid_ns_prepare_proc(ns); if (err) goto out_put_parent_pid_ns; return ns; out_put_parent_pid_ns: put_pid_ns(parent_pid_ns); out_free_map: kfree(ns->pidmap[0].page); out_free: kmem_cache_free(pid_ns_cachep, ns); out: return ERR_PTR(err); } static void destroy_pid_namespace(struct pid_namespace *ns) { int i; for (i = 0; i < PIDMAP_ENTRIES; i++) kfree(ns->pidmap[i].page); kmem_cache_free(pid_ns_cachep, ns); } struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) { if (!(flags & CLONE_NEWPID)) return get_pid_ns(old_ns); if (flags & (CLONE_THREAD|CLONE_PARENT)) return ERR_PTR(-EINVAL); return create_pid_namespace(old_ns); } void free_pid_ns(struct kref *kref) { struct pid_namespace *ns, *parent; ns = container_of(kref, struct pid_namespace, kref); parent = ns->parent; destroy_pid_namespace(ns); if (parent != NULL) put_pid_ns(parent); } void zap_pid_ns_processes(struct pid_namespace *pid_ns) { int nr; int rc; struct task_struct *task; /* * The last thread in the cgroup-init thread group is terminating. * Find remaining pid_ts in the namespace, signal and wait for them * to exit. * * Note: This signals each threads in the namespace - even those that * belong to the same thread group, To avoid this, we would have * to walk the entire tasklist looking a processes in this * namespace, but that could be unnecessarily expensive if the * pid namespace has just a few processes. Or we need to * maintain a tasklist for each pid namespace. * */ read_lock(&tasklist_lock); nr = next_pidmap(pid_ns, 1); while (nr > 0) { rcu_read_lock(); task = pid_task(find_vpid(nr), PIDTYPE_PID); if (task && !__fatal_signal_pending(task)) send_sig_info(SIGKILL, SEND_SIG_FORCED, task); rcu_read_unlock(); nr = next_pidmap(pid_ns, nr); } read_unlock(&tasklist_lock); do { clear_thread_flag(TIF_SIGPENDING); rc = sys_wait4(-1, NULL, __WALL, NULL); } while (rc != -ECHILD); if (pid_ns->reboot) current->signal->group_exit_code = pid_ns->reboot; acct_exit_ns(pid_ns); return; } static int pid_ns_ctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table tmp = *table; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; /* * Writing directly to ns' last_pid field is OK, since this field * is volatile in a living namespace anyway and a code writing to * it should synchronize its usage with external means. */ tmp.data = &current->nsproxy->pid_ns->last_pid; return proc_dointvec(&tmp, write, buffer, lenp, ppos); } static struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, }, { } }; static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) { if (pid_ns == &init_pid_ns) return 0; switch (cmd) { case LINUX_REBOOT_CMD_RESTART2: case LINUX_REBOOT_CMD_RESTART: pid_ns->reboot = SIGHUP; break; case LINUX_REBOOT_CMD_POWER_OFF: case LINUX_REBOOT_CMD_HALT: pid_ns->reboot = SIGINT; break; default: return -EINVAL; } read_lock(&tasklist_lock); force_sig(SIGKILL, pid_ns->child_reaper); read_unlock(&tasklist_lock); do_exit(0); /* Not reached */ return 0; } static __init int pid_namespaces_init(void) { pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); register_sysctl_paths(kern_path, pid_ns_ctl_table); return 0; } __initcall(pid_namespaces_init);
gpl-2.0
xiaognol/android_kernel_zte_nx503a-4.2
sound/pci/ymfpci/ymfpci_main.c
4855
73287
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of YMF724/740/744/754 chips * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/tlv.h> #include <sound/ymfpci.h> #include <sound/asoundef.h> #include <sound/mpu401.h> #include <asm/io.h> #include <asm/byteorder.h> /* * common I/O routines */ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip); static inline u8 snd_ymfpci_readb(struct snd_ymfpci *chip, u32 offset) { return readb(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writeb(struct snd_ymfpci *chip, u32 offset, u8 val) { writeb(val, chip->reg_area_virt + offset); } static inline u16 snd_ymfpci_readw(struct snd_ymfpci *chip, u32 offset) { return readw(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writew(struct snd_ymfpci *chip, u32 offset, u16 val) { writew(val, chip->reg_area_virt + offset); } static inline u32 snd_ymfpci_readl(struct snd_ymfpci *chip, u32 offset) { return readl(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writel(struct snd_ymfpci *chip, u32 offset, u32 val) { writel(val, chip->reg_area_virt + offset); } static int snd_ymfpci_codec_ready(struct snd_ymfpci *chip, int secondary) { unsigned long end_time; u32 reg = secondary ? YDSXGR_SECSTATUSADR : YDSXGR_PRISTATUSADR; end_time = jiffies + msecs_to_jiffies(750); do { if ((snd_ymfpci_readw(chip, reg) & 0x8000) == 0) return 0; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_ymfpci_readw(chip, reg)); return -EBUSY; } static void snd_ymfpci_codec_write(struct snd_ac97 *ac97, u16 reg, u16 val) { struct snd_ymfpci *chip = ac97->private_data; u32 cmd; snd_ymfpci_codec_ready(chip, 0); cmd = ((YDSXG_AC97WRITECMD | reg) << 16) | val; snd_ymfpci_writel(chip, YDSXGR_AC97CMDDATA, cmd); } static u16 snd_ymfpci_codec_read(struct snd_ac97 *ac97, u16 reg) { struct snd_ymfpci *chip = ac97->private_data; if (snd_ymfpci_codec_ready(chip, 0)) return ~0; snd_ymfpci_writew(chip, YDSXGR_AC97CMDADR, YDSXG_AC97READCMD | reg); if (snd_ymfpci_codec_ready(chip, 0)) return ~0; if (chip->device_id == PCI_DEVICE_ID_YAMAHA_744 && chip->rev < 2) { int i; for (i = 0; i < 600; i++) snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } return snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } /* * Misc routines */ static u32 snd_ymfpci_calc_delta(u32 rate) { switch (rate) { case 8000: return 0x02aaab00; case 11025: return 0x03accd00; case 16000: return 0x05555500; case 22050: return 0x07599a00; case 32000: return 0x0aaaab00; case 44100: return 0x0eb33300; default: return ((rate << 16) / 375) << 5; } } static u32 def_rate[8] = { 100, 2000, 8000, 11025, 16000, 22050, 32000, 48000 }; static u32 snd_ymfpci_calc_lpfK(u32 rate) { u32 i; static u32 val[8] = { 0x00570000, 0x06AA0000, 0x18B20000, 0x20930000, 0x2B9A0000, 0x35A10000, 0x3EAA0000, 0x40000000 }; if (rate == 44100) return 0x40000000; /* FIXME: What's the right value? */ for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } static u32 snd_ymfpci_calc_lpfQ(u32 rate) { u32 i; static u32 val[8] = { 0x35280000, 0x34A70000, 0x32020000, 0x31770000, 0x31390000, 0x31C90000, 0x33D00000, 0x40000000 }; if (rate == 44100) return 0x370A0000; for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } /* * Hardware start management */ static void snd_ymfpci_hw_start(struct snd_ymfpci *chip) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->start_count++ > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | 3); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) { unsigned long flags; long timeout = 1000; spin_lock_irqsave(&chip->reg_lock, flags); if (--chip->start_count > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~3); while (timeout-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) break; } if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * Playback voice management */ static int voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { struct snd_ymfpci_voice *voice, *voice2; int idx; *rvoice = NULL; for (idx = 0; idx < YDSXG_PLAYBACK_VOICES; idx += pair ? 2 : 1) { voice = &chip->voices[idx]; voice2 = pair ? &chip->voices[idx+1] : NULL; if (voice->use || (voice2 && voice2->use)) continue; voice->use = 1; if (voice2) voice2->use = 1; switch (type) { case YMFPCI_PCM: voice->pcm = 1; if (voice2) voice2->pcm = 1; break; case YMFPCI_SYNTH: voice->synth = 1; break; case YMFPCI_MIDI: voice->midi = 1; break; } snd_ymfpci_hw_start(chip); if (voice2) snd_ymfpci_hw_start(chip); *rvoice = voice; return 0; } return -ENOMEM; } static int snd_ymfpci_voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(pair && type != YMFPCI_PCM)) return -EINVAL; spin_lock_irqsave(&chip->voice_lock, flags); for (;;) { result = voice_alloc(chip, type, pair, rvoice); if (result == 0 || type != YMFPCI_PCM) break; /* TODO: synth/midi voice deallocation */ break; } spin_unlock_irqrestore(&chip->voice_lock, flags); return result; } static int snd_ymfpci_voice_free(struct snd_ymfpci *chip, struct snd_ymfpci_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; snd_ymfpci_hw_stop(chip); spin_lock_irqsave(&chip->voice_lock, flags); if (pvoice->number == chip->src441_used) { chip->src441_used = -1; pvoice->ypcm->use_441_slot = 0; } pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0; pvoice->ypcm = NULL; pvoice->interrupt = NULL; spin_unlock_irqrestore(&chip->voice_lock, flags); return 0; } /* * PCM part */ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_voice *voice) { struct snd_ymfpci_pcm *ypcm; u32 pos, delta; if ((ypcm = voice->ypcm) == NULL) return; if (ypcm->substream == NULL) return; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(voice->bank[chip->active_bank].start); if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ ypcm->period_pos %= ypcm->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ypcm->substream); spin_lock(&chip->reg_lock); } if (unlikely(ypcm->update_pcm_vol)) { unsigned int subs = ypcm->substream->number; unsigned int next_bank = 1 - chip->active_bank; struct snd_ymfpci_playback_bank *bank; u32 volume; bank = &voice->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15); bank->left_gain_end = volume; if (ypcm->output_rear) bank->eff2_gain_end = volume; if (ypcm->voices[1]) bank = &ypcm->voices[1]->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15); bank->right_gain_end = volume; if (ypcm->output_rear) bank->eff3_gain_end = volume; ypcm->update_pcm_vol--; } } spin_unlock(&chip->reg_lock); } static void snd_ymfpci_pcm_capture_interrupt(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci *chip = ypcm->chip; u32 pos, delta; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { ypcm->period_pos %= ypcm->period_size; /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } static int snd_ymfpci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; struct snd_kcontrol *kctl = NULL; int result = 0; spin_lock(&chip->reg_lock); if (ypcm->voices[0] == NULL) { result = -EINVAL; goto __unlock; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->ctrl_playback[ypcm->voices[0]->number + 1] = cpu_to_le32(ypcm->voices[0]->bank_addr); if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = cpu_to_le32(ypcm->voices[1]->bank_addr); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } /* fall through */ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->ctrl_playback[ypcm->voices[0]->number + 1] = 0; if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = 0; ypcm->running = 0; break; default: result = -EINVAL; break; } __unlock: spin_unlock(&chip->reg_lock); if (kctl) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); return result; } static int snd_ymfpci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; int result = 0; u32 tmp; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 0; break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } static int snd_ymfpci_pcm_voice_alloc(struct snd_ymfpci_pcm *ypcm, int voices) { int err; if (ypcm->voices[1] != NULL && voices < 2) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (voices == 1 && ypcm->voices[0] != NULL) return 0; /* already allocated */ if (voices == 2 && ypcm->voices[0] != NULL && ypcm->voices[1] != NULL) return 0; /* already allocated */ if (voices > 1) { if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } } err = snd_ymfpci_voice_alloc(ypcm->chip, YMFPCI_PCM, voices > 1, &ypcm->voices[0]); if (err < 0) return err; ypcm->voices[0]->ypcm = ypcm; ypcm->voices[0]->interrupt = snd_ymfpci_pcm_interrupt; if (voices > 1) { ypcm->voices[1] = &ypcm->chip->voices[ypcm->voices[0]->number + 1]; ypcm->voices[1]->ypcm = ypcm; } return 0; } static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int voiceidx, struct snd_pcm_runtime *runtime, int has_pcm_volume) { struct snd_ymfpci_voice *voice = ypcm->voices[voiceidx]; u32 format; u32 delta = snd_ymfpci_calc_delta(runtime->rate); u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate); u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate); struct snd_ymfpci_playback_bank *bank; unsigned int nbank; u32 vol_left, vol_right; u8 use_left, use_right; unsigned long flags; if (snd_BUG_ON(!voice)) return; if (runtime->channels == 1) { use_left = 1; use_right = 1; } else { use_left = (voiceidx & 1) == 0; use_right = !use_left; } if (has_pcm_volume) { vol_left = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].left << 15); vol_right = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].right << 15); } else { vol_left = cpu_to_le32(0x40000000); vol_right = cpu_to_le32(0x40000000); } spin_lock_irqsave(&ypcm->chip->voice_lock, flags); format = runtime->channels == 2 ? 0x00010000 : 0; if (snd_pcm_format_width(runtime->format) == 8) format |= 0x80000000; else if (ypcm->chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && runtime->rate == 44100 && runtime->channels == 2 && voiceidx == 0 && (ypcm->chip->src441_used == -1 || ypcm->chip->src441_used == voice->number)) { ypcm->chip->src441_used = voice->number; ypcm->use_441_slot = 1; format |= 0x10000000; } if (ypcm->chip->src441_used == voice->number && (format & 0x10000000) == 0) { ypcm->chip->src441_used = -1; ypcm->use_441_slot = 0; } if (runtime->channels == 2 && (voiceidx & 1) != 0) format |= 1; spin_unlock_irqrestore(&ypcm->chip->voice_lock, flags); for (nbank = 0; nbank < 2; nbank++) { bank = &voice->bank[nbank]; memset(bank, 0, sizeof(*bank)); bank->format = cpu_to_le32(format); bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size); bank->lpfQ = cpu_to_le32(lpfQ); bank->delta = bank->delta_end = cpu_to_le32(delta); bank->lpfK = bank->lpfK_end = cpu_to_le32(lpfK); bank->eg_gain = bank->eg_gain_end = cpu_to_le32(0x40000000); if (ypcm->output_front) { if (use_left) { bank->left_gain = bank->left_gain_end = vol_left; } if (use_right) { bank->right_gain = bank->right_gain_end = vol_right; } } if (ypcm->output_rear) { if (!ypcm->swap_rear) { if (use_left) { bank->eff2_gain = bank->eff2_gain_end = vol_left; } if (use_right) { bank->eff3_gain = bank->eff3_gain_end = vol_right; } } else { /* The SPDIF out channels seem to be swapped, so we have * to swap them here, too. The rear analog out channels * will be wrong, but otherwise AC3 would not work. */ if (use_left) { bank->eff3_gain = bank->eff3_gain_end = vol_left; } if (use_right) { bank->eff2_gain = bank->eff2_gain_end = vol_right; } } } } } static int __devinit snd_ymfpci_ac3_init(struct snd_ymfpci *chip) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 4096, &chip->ac3_tmp_base) < 0) return -ENOMEM; chip->bank_effect[3][0]->base = chip->bank_effect[3][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr); chip->bank_effect[3][0]->loop_end = chip->bank_effect[3][1]->loop_end = cpu_to_le32(1024); chip->bank_effect[4][0]->base = chip->bank_effect[4][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr + 2048); chip->bank_effect[4][0]->loop_end = chip->bank_effect[4][1]->loop_end = cpu_to_le32(1024); spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) | 3 << 3); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_ac3_done(struct snd_ymfpci *chip) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) & ~(3 << 3)); spin_unlock_irq(&chip->reg_lock); // snd_ymfpci_irq_wait(chip); if (chip->ac3_tmp_base.area) { snd_dma_free_pages(&chip->ac3_tmp_base); chip->ac3_tmp_base.area = NULL; } return 0; } static int snd_ymfpci_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; if ((err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params))) < 0) return err; return 0; } static int snd_ymfpci_playback_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; if (runtime->private_data == NULL) return 0; ypcm = runtime->private_data; /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); snd_pcm_lib_free_pages(substream); if (ypcm->voices[1]) { snd_ymfpci_voice_free(chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (ypcm->voices[0]) { snd_ymfpci_voice_free(chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } return 0; } static int snd_ymfpci_playback_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_kcontrol *kctl; unsigned int nvoice; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; for (nvoice = 0; nvoice < runtime->channels; nvoice++) snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime, substream->pcm == chip->pcm); if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); } return 0; } static int snd_ymfpci_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ymfpci_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); return snd_pcm_lib_free_pages(substream); } static int snd_ymfpci_capture_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_capture_bank * bank; int nbank; u32 rate, format; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; ypcm->shift = 0; rate = ((48000 * 4096) / runtime->rate) - 1; format = 0; if (runtime->channels == 2) { format |= 2; ypcm->shift++; } if (snd_pcm_format_width(runtime->format) == 8) format |= 1; else ypcm->shift++; switch (ypcm->capture_bank_number) { case 0: snd_ymfpci_writel(chip, YDSXGR_RECFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_RECSLOTSR, rate); break; case 1: snd_ymfpci_writel(chip, YDSXGR_ADCFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_ADCSLOTSR, rate); break; } for (nbank = 0; nbank < 2; nbank++) { bank = chip->bank_capture[ypcm->capture_bank_number][nbank]; bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size << ypcm->shift); bank->start = 0; bank->num_of_loops = 0; } return 0; } static snd_pcm_uframes_t snd_ymfpci_playback_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_voice *voice = ypcm->voices[0]; if (!(ypcm->running && voice)) return 0; return le32_to_cpu(voice->bank[chip->active_bank].start); } static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (!ypcm->running) return 0; return le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; } static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) { wait_queue_t wait; int loops = 4; while (loops-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_MODE) & 3) == 0) continue; init_waitqueue_entry(&wait, current); add_wait_queue(&chip->interrupt_sleep, &wait); atomic_inc(&chip->interrupt_sleep_count); schedule_timeout_uninterruptible(msecs_to_jiffies(50)); remove_wait_queue(&chip->interrupt_sleep, &wait); } } static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) { struct snd_ymfpci *chip = dev_id; u32 status, nvoice, mode; struct snd_ymfpci_voice *voice; status = snd_ymfpci_readl(chip, YDSXGR_STATUS); if (status & 0x80000000) { chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; spin_lock(&chip->voice_lock); for (nvoice = 0; nvoice < YDSXG_PLAYBACK_VOICES; nvoice++) { voice = &chip->voices[nvoice]; if (voice->interrupt) voice->interrupt(chip, voice); } for (nvoice = 0; nvoice < YDSXG_CAPTURE_VOICES; nvoice++) { if (chip->capture_substream[nvoice]) snd_ymfpci_pcm_capture_interrupt(chip->capture_substream[nvoice]); } #if 0 for (nvoice = 0; nvoice < YDSXG_EFFECT_VOICES; nvoice++) { if (chip->effect_substream[nvoice]) snd_ymfpci_pcm_effect_interrupt(chip->effect_substream[nvoice]); } #endif spin_unlock(&chip->voice_lock); spin_lock(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_STATUS, 0x80000000); mode = snd_ymfpci_readl(chip, YDSXGR_MODE) | 2; snd_ymfpci_writel(chip, YDSXGR_MODE, mode); spin_unlock(&chip->reg_lock); if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } } status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG); if (status & 1) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer_ticks); } snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status); if (chip->rawmidi) snd_mpu401_uart_interrupt(irq, chip->rawmidi->private_data); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_ymfpci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ymfpci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static void snd_ymfpci_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int snd_ymfpci_playback_open_1(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_playback; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = PLAYBACK_VOICE; ypcm->substream = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; return 0; } /* call with spinlock held */ static void ymfpci_open_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) /* set AC3 */ snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | (1 << 30)); /* enable second codec (4CHEN) */ snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) | 0x0010); } } /* call with spinlock held */ static void ymfpci_close_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~(1 << 30)); snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) & ~0x0010); } } static int snd_ymfpci_playback_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 1; ypcm->output_rear = chip->mode_dup4ch ? 1 : 0; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear) { ymfpci_open_extension(chip); chip->rear_opened++; } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_playback_spdif_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 1; spin_lock_irq(&chip->reg_lock); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) | 2); ymfpci_open_extension(chip); chip->spdif_pcm_bits = chip->spdif_bits; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); chip->spdif_opened++; spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return 0; } static int snd_ymfpci_playback_4ch_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); ymfpci_open_extension(chip); chip->rear_opened++; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_capture_open(struct snd_pcm_substream *substream, u32 capture_bank_number) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_capture; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = capture_bank_number + CAPTURE_REC; ypcm->substream = substream; ypcm->capture_bank_number = capture_bank_number; chip->capture_substream[capture_bank_number] = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; snd_ymfpci_hw_start(chip); return 0; } static int snd_ymfpci_capture_rec_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 0); } static int snd_ymfpci_capture_ac97_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 1); } static int snd_ymfpci_playback_close_1(struct snd_pcm_substream *substream) { return 0; } static int snd_ymfpci_playback_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear && chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_spdif_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); chip->spdif_opened = 0; ymfpci_close_extension(chip); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & ~2); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_4ch_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); if (chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_capture_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (ypcm != NULL) { chip->capture_substream[ypcm->capture_bank_number] = NULL; snd_ymfpci_hw_stop(chip); } return 0; } static struct snd_pcm_ops snd_ymfpci_playback_ops = { .open = snd_ymfpci_playback_open, .close = snd_ymfpci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = { .open = snd_ymfpci_capture_rec_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_rec_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = { .open = snd_ymfpci_capture_ac97_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_ac97_ops); /* global setup */ pcm->info_flags = 0; sprintf(pcm->name, "YMFPCI - %s", chip->device_id == PCI_DEVICE_ID_YAMAHA_754 ? "Direct Recording" : "AC'97"); chip->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = { .open = snd_ymfpci_playback_spdif_open, .close = snd_ymfpci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_spdif_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - IEC958"); chip->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = { .open = snd_ymfpci_playback_4ch_open, .close = snd_ymfpci_playback_4ch_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_4ch_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - Rear PCM"); chip->pcm_4ch = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static int snd_ymfpci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_bits != val; chip->spdif_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 1) && chip->pcm_spdif == NULL) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ymfpci_spdif_default_info, .get = snd_ymfpci_spdif_default_get, .put = snd_ymfpci_spdif_default_put }; static int snd_ymfpci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = 0x3e; ucontrol->value.iec958.status[1] = 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static struct snd_kcontrol_new snd_ymfpci_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_ymfpci_spdif_mask_info, .get = snd_ymfpci_spdif_mask_get, }; static int snd_ymfpci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_pcm_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_pcm_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_pcm_bits != val; chip->spdif_pcm_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 2)) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_ymfpci_spdif_stream_info, .get = snd_ymfpci_spdif_stream_get, .put = snd_ymfpci_spdif_stream_put }; static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"}; return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg; spin_lock_irq(&chip->reg_lock); reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); spin_unlock_irq(&chip->reg_lock); if (!(reg & 0x100)) value->value.enumerated.item[0] = 0; else value->value.enumerated.item[0] = 1 + ((reg & 0x200) != 0); return 0; } static int snd_ymfpci_drec_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg, old_reg; spin_lock_irq(&chip->reg_lock); old_reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); if (value->value.enumerated.item[0] == 0) reg = old_reg & ~0x100; else reg = (old_reg & ~0x300) | 0x100 | ((value->value.enumerated.item[0] == 2) << 9); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, reg); spin_unlock_irq(&chip->reg_lock); return reg != old_reg; } static struct snd_kcontrol_new snd_ymfpci_drec_source __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Direct Recording Source", .info = snd_ymfpci_drec_source_info, .get = snd_ymfpci_drec_source_get, .put = snd_ymfpci_drec_source_put }; /* * Mixer controls */ #define YMFPCI_SINGLE(xname, xindex, reg, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ymfpci_info_single, \ .get = snd_ymfpci_get_single, .put = snd_ymfpci_put_single, \ .private_value = ((reg) | ((shift) << 16)) } #define snd_ymfpci_info_single snd_ctl_boolean_mono_info static int snd_ymfpci_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } ucontrol->value.integer.value[0] = (snd_ymfpci_readl(chip, reg) >> shift) & mask; return 0; } static int snd_ymfpci_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; int change; unsigned int val, oval; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } val = (ucontrol->value.integer.value[0] & mask); val <<= shift; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_ymfpci_writel(chip, reg, val); spin_unlock_irq(&chip->reg_lock); return change; } static const DECLARE_TLV_DB_LINEAR(db_scale_native, TLV_DB_GAIN_MUTE, 0); #define YMFPCI_DOUBLE(xname, xindex, reg) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = snd_ymfpci_info_double, \ .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_double, \ .private_value = reg, \ .tlv = { .p = db_scale_native } } static int snd_ymfpci_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int reg = kcontrol->private_value; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 16383; return 0; } static int snd_ymfpci_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; unsigned int val; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; spin_lock_irq(&chip->reg_lock); val = snd_ymfpci_readl(chip, reg); spin_unlock_irq(&chip->reg_lock); ucontrol->value.integer.value[0] = (val >> shift_left) & mask; ucontrol->value.integer.value[1] = (val >> shift_right) & mask; return 0; } static int snd_ymfpci_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; int change; unsigned int val1, val2, oval; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val1 = (oval & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2; change = val1 != oval; snd_ymfpci_writel(chip, reg, val1); spin_unlock_irq(&chip->reg_lock); return change; } static int snd_ymfpci_put_nativedacvol(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = YDSXGR_NATIVEDACOUTVOL; unsigned int reg2 = YDSXGR_BUF441OUTVOL; int change; unsigned int value, oval; value = ucontrol->value.integer.value[0] & 0x3fff; value |= (ucontrol->value.integer.value[1] & 0x3fff) << 16; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); change = value != oval; snd_ymfpci_writel(chip, reg, value); snd_ymfpci_writel(chip, reg2, value); spin_unlock_irq(&chip->reg_lock); return change; } /* * 4ch duplication */ #define snd_ymfpci_info_dup4ch snd_ctl_boolean_mono_info static int snd_ymfpci_get_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->mode_dup4ch; return 0; } static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int change; change = (ucontrol->value.integer.value[0] != chip->mode_dup4ch); if (change) chip->mode_dup4ch = !!ucontrol->value.integer.value[0]; return change; } static struct snd_kcontrol_new snd_ymfpci_dup4ch __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "4ch Duplication", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_ymfpci_info_dup4ch, .get = snd_ymfpci_get_dup4ch, .put = snd_ymfpci_put_dup4ch, }; static struct snd_kcontrol_new snd_ymfpci_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wave Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = snd_ymfpci_info_double, .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_nativedacvol, .private_value = YDSXGR_NATIVEDACOUTVOL, .tlv = { .p = db_scale_native }, }, YMFPCI_DOUBLE("Wave Capture Volume", 0, YDSXGR_NATIVEDACLOOPVOL), YMFPCI_DOUBLE("Digital Capture Volume", 0, YDSXGR_NATIVEDACINVOL), YMFPCI_DOUBLE("Digital Capture Volume", 1, YDSXGR_NATIVEADCINVOL), YMFPCI_DOUBLE("ADC Playback Volume", 0, YDSXGR_PRIADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 0, YDSXGR_PRIADCLOOPVOL), YMFPCI_DOUBLE("ADC Playback Volume", 1, YDSXGR_SECADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 1, YDSXGR_SECADCLOOPVOL), YMFPCI_DOUBLE("FM Legacy Playback Volume", 0, YDSXGR_LEGACYOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ", PLAYBACK,VOLUME), 0, YDSXGR_ZVOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("", CAPTURE,VOLUME), 0, YDSXGR_ZVLOOPVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ",PLAYBACK,VOLUME), 1, YDSXGR_SPDIFOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,VOLUME), 1, YDSXGR_SPDIFLOOPVOL), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 0, YDSXGR_SPDIFOUTCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, YDSXGR_SPDIFINCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("Loop",NONE,NONE), 0, YDSXGR_SPDIFINCTRL, 4), }; /* * GPIO */ static int snd_ymfpci_get_gpio_out(struct snd_ymfpci *chip, int pin) { u16 reg, mode; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << (pin + 8)); reg |= (1 << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); /* set the level mode for input line */ mode = snd_ymfpci_readw(chip, YDSXGR_GPIOTYPECONFIG); mode &= ~(3 << (pin * 2)); snd_ymfpci_writew(chip, YDSXGR_GPIOTYPECONFIG, mode); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); mode = snd_ymfpci_readw(chip, YDSXGR_GPIOINSTATUS); spin_unlock_irqrestore(&chip->reg_lock, flags); return (mode >> pin) & 1; } static int snd_ymfpci_set_gpio_out(struct snd_ymfpci *chip, int pin, int enable) { u16 reg; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << pin); reg &= ~(1 << (pin + 8)); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); snd_ymfpci_writew(chip, YDSXGR_GPIOOUTCTRL, enable << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } #define snd_ymfpci_gpio_sw_info snd_ctl_boolean_mono_info static int snd_ymfpci_gpio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 0; } static int snd_ymfpci_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; if (snd_ymfpci_get_gpio_out(chip, pin) != ucontrol->value.integer.value[0]) { snd_ymfpci_set_gpio_out(chip, pin, !!ucontrol->value.integer.value[0]); ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_rear_shared __devinitdata = { .name = "Shared Rear/Line-In Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_ymfpci_gpio_sw_info, .get = snd_ymfpci_gpio_sw_get, .put = snd_ymfpci_gpio_sw_put, .private_value = 2, }; /* * PCM voice volume */ static int snd_ymfpci_pcm_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x8000; return 0; } static int snd_ymfpci_pcm_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left; ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right; return 0; } static int snd_ymfpci_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; struct snd_pcm_substream *substream; unsigned long flags; if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left || ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) { chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0]; chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1]; if (chip->pcm_mixer[subs].left > 0x8000) chip->pcm_mixer[subs].left = 0x8000; if (chip->pcm_mixer[subs].right > 0x8000) chip->pcm_mixer[subs].right = 0x8000; substream = (struct snd_pcm_substream *)kcontrol->private_value; spin_lock_irqsave(&chip->voice_lock, flags); if (substream->runtime && substream->runtime->private_data) { struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; if (!ypcm->use_441_slot) ypcm->update_pcm_vol = 2; } spin_unlock_irqrestore(&chip->voice_lock, flags); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_pcm_volume __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "PCM Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = snd_ymfpci_pcm_vol_info, .get = snd_ymfpci_pcm_vol_get, .put = snd_ymfpci_pcm_vol_put, }; /* * Mixer routines */ static void snd_ymfpci_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_ymfpci *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ymfpci_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_ymfpci *chip = ac97->private_data; chip->ac97 = NULL; } int __devinit snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch) { struct snd_ac97_template ac97; struct snd_kcontrol *kctl; struct snd_pcm_substream *substream; unsigned int idx; int err; static struct snd_ac97_bus_ops ops = { .write = snd_ymfpci_codec_write, .read = snd_ymfpci_codec_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_ymfpci_mixer_free_ac97_bus; chip->ac97_bus->no_vra = 1; /* YMFPCI doesn't need VRA */ memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ymfpci_mixer_free_ac97; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; /* to be sure */ snd_ac97_update_bits(chip->ac97, AC97_EXTENDED_STATUS, AC97_EA_VRA|AC97_EA_VRM, 0); for (idx = 0; idx < ARRAY_SIZE(snd_ymfpci_controls); idx++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0) return err; } if (chip->ac97->ext_id & AC97_EI_SDAC) { kctl = snd_ctl_new1(&snd_ymfpci_dup4ch, chip); err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; } /* add S/PDIF control */ if (snd_BUG_ON(!chip->pcm_spdif)) return -ENXIO; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; chip->spdif_pcm_ctl = kctl; /* direct recording source */ if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && (err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip))) < 0) return err; /* * shared rear/line-in */ if (rear_switch) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip))) < 0) return err; } /* per-voice volume */ substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; for (idx = 0; idx < 32; ++idx) { kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip); if (!kctl) return -ENOMEM; kctl->id.device = chip->pcm->device; kctl->id.subdevice = idx; kctl->private_value = (unsigned long)substream; if ((err = snd_ctl_add(chip->card, kctl)) < 0) return err; chip->pcm_mixer[idx].left = 0x8000; chip->pcm_mixer[idx].right = 0x8000; chip->pcm_mixer[idx].ctl = kctl; substream = substream->next; } return 0; } /* * timer */ static int snd_ymfpci_timer_start(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; unsigned int count; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); if (timer->sticks > 1) { chip->timer_ticks = timer->sticks; count = timer->sticks - 1; } else { /* * Divisor 1 is not allowed; fake it by using divisor 2 and * counting two ticks for each interrupt. */ chip->timer_ticks = 2; count = 2 - 1; } snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_stop(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x00); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer, unsigned long *num, unsigned long *den) { *num = 1; *den = 96000; return 0; } static struct snd_timer_hardware snd_ymfpci_timer_hw = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */ .ticks = 0x10000, .start = snd_ymfpci_timer_start, .stop = snd_ymfpci_timer_stop, .precise_resolution = snd_ymfpci_timer_precise_resolution, }; int __devinit snd_ymfpci_timer(struct snd_ymfpci *chip, int device) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = chip->card->number; tid.device = device; tid.subdevice = 0; if ((err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer)) >= 0) { strcpy(timer->name, "YMFPCI timer"); timer->private_data = chip; timer->hw = snd_ymfpci_timer_hw; } chip->timer = timer; return err; } /* * proc interface */ static void snd_ymfpci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ymfpci *chip = entry->private_data; int i; snd_iprintf(buffer, "YMFPCI\n\n"); for (i = 0; i <= YDSXGR_WORKBASE; i += 4) snd_iprintf(buffer, "%04x: %04x\n", i, snd_ymfpci_readl(chip, i)); } static int __devinit snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(card, "ymfpci", &entry)) snd_info_set_text_ops(entry, chip, snd_ymfpci_proc_read); return 0; } /* * initialization routines */ static void snd_ymfpci_aclink_reset(struct pci_dev * pci) { u8 cmd; pci_read_config_byte(pci, PCIR_DSXG_CTRL, &cmd); #if 0 // force to reset if (cmd & 0x03) { #endif pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd | 0x03); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL1, 0); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL2, 0); #if 0 } #endif } static void snd_ymfpci_enable_dsp(struct snd_ymfpci *chip) { snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000001); } static void snd_ymfpci_disable_dsp(struct snd_ymfpci *chip) { u32 val; int timeout = 1000; val = snd_ymfpci_readl(chip, YDSXGR_CONFIG); if (val) snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000000); while (timeout-- > 0) { val = snd_ymfpci_readl(chip, YDSXGR_STATUS); if ((val & 0x00000002) == 0) break; } } static int snd_ymfpci_request_firmware(struct snd_ymfpci *chip) { int err, is_1e; const char *name; err = request_firmware(&chip->dsp_microcode, "yamaha/ds1_dsp.fw", &chip->pci->dev); if (err >= 0) { if (chip->dsp_microcode->size != YDSXG_DSPLENGTH) { snd_printk(KERN_ERR "DSP microcode has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; is_1e = chip->device_id == PCI_DEVICE_ID_YAMAHA_724F || chip->device_id == PCI_DEVICE_ID_YAMAHA_740C || chip->device_id == PCI_DEVICE_ID_YAMAHA_744 || chip->device_id == PCI_DEVICE_ID_YAMAHA_754; name = is_1e ? "yamaha/ds1e_ctrl.fw" : "yamaha/ds1_ctrl.fw"; err = request_firmware(&chip->controller_microcode, name, &chip->pci->dev); if (err >= 0) { if (chip->controller_microcode->size != YDSXG_CTRLLENGTH) { snd_printk(KERN_ERR "controller microcode" " has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; return 0; } MODULE_FIRMWARE("yamaha/ds1_dsp.fw"); MODULE_FIRMWARE("yamaha/ds1_ctrl.fw"); MODULE_FIRMWARE("yamaha/ds1e_ctrl.fw"); static void snd_ymfpci_download_image(struct snd_ymfpci *chip) { int i; u16 ctrl; const __le32 *inst; snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x00000000); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00010000); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0x00000000); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); /* setup DSP instruction code */ inst = (const __le32 *)chip->dsp_microcode->data; for (i = 0; i < YDSXG_DSPLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_DSPINSTRAM + (i << 2), le32_to_cpu(inst[i])); /* setup control instruction code */ inst = (const __le32 *)chip->controller_microcode->data; for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_CTRLINSTRAM + (i << 2), le32_to_cpu(inst[i])); snd_ymfpci_enable_dsp(chip); } static int __devinit snd_ymfpci_memalloc(struct snd_ymfpci *chip) { long size, playback_ctrl_size; int voice, bank, reg; u8 *ptr; dma_addr_t ptr_addr; playback_ctrl_size = 4 + 4 * YDSXG_PLAYBACK_VOICES; chip->bank_size_playback = snd_ymfpci_readl(chip, YDSXGR_PLAYCTRLSIZE) << 2; chip->bank_size_capture = snd_ymfpci_readl(chip, YDSXGR_RECCTRLSIZE) << 2; chip->bank_size_effect = snd_ymfpci_readl(chip, YDSXGR_EFFCTRLSIZE) << 2; chip->work_size = YDSXG_DEFAULT_WORK_SIZE; size = ALIGN(playback_ctrl_size, 0x100) + ALIGN(chip->bank_size_playback * 2 * YDSXG_PLAYBACK_VOICES, 0x100) + ALIGN(chip->bank_size_capture * 2 * YDSXG_CAPTURE_VOICES, 0x100) + ALIGN(chip->bank_size_effect * 2 * YDSXG_EFFECT_VOICES, 0x100) + chip->work_size; /* work_ptr must be aligned to 256 bytes, but it's already covered with the kernel page allocation mechanism */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), size, &chip->work_ptr) < 0) return -ENOMEM; ptr = chip->work_ptr.area; ptr_addr = chip->work_ptr.addr; memset(ptr, 0, size); /* for sure */ chip->bank_base_playback = ptr; chip->bank_base_playback_addr = ptr_addr; chip->ctrl_playback = (u32 *)ptr; chip->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES); ptr += ALIGN(playback_ctrl_size, 0x100); ptr_addr += ALIGN(playback_ctrl_size, 0x100); for (voice = 0; voice < YDSXG_PLAYBACK_VOICES; voice++) { chip->voices[voice].number = voice; chip->voices[voice].bank = (struct snd_ymfpci_playback_bank *)ptr; chip->voices[voice].bank_addr = ptr_addr; for (bank = 0; bank < 2; bank++) { chip->bank_playback[voice][bank] = (struct snd_ymfpci_playback_bank *)ptr; ptr += chip->bank_size_playback; ptr_addr += chip->bank_size_playback; } } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_capture = ptr; chip->bank_base_capture_addr = ptr_addr; for (voice = 0; voice < YDSXG_CAPTURE_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_capture[voice][bank] = (struct snd_ymfpci_capture_bank *)ptr; ptr += chip->bank_size_capture; ptr_addr += chip->bank_size_capture; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_effect = ptr; chip->bank_base_effect_addr = ptr_addr; for (voice = 0; voice < YDSXG_EFFECT_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_effect[voice][bank] = (struct snd_ymfpci_effect_bank *)ptr; ptr += chip->bank_size_effect; ptr_addr += chip->bank_size_effect; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->work_base = ptr; chip->work_base_addr = ptr_addr; snd_BUG_ON(ptr + chip->work_size != chip->work_ptr.area + chip->work_ptr.bytes); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, chip->bank_base_capture_addr); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, chip->bank_base_effect_addr); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, chip->work_base_addr); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, chip->work_size >> 2); /* S/PDIF output initialization */ chip->spdif_bits = chip->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF & 0xffff; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, 0); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); /* S/PDIF input initialization */ snd_ymfpci_writew(chip, YDSXGR_SPDIFINCTRL, 0); /* digital mixer setup */ for (reg = 0x80; reg < 0xc0; reg += 4) snd_ymfpci_writel(chip, reg, 0); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_ZVOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_SPDIFOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEADCINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_PRIADCLOOPVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0x3fff3fff); return 0; } static int snd_ymfpci_free(struct snd_ymfpci *chip) { u16 ctrl; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->res_reg_area) { /* don't touch busy hardware */ snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_STATUS, ~0); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, 0); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); } snd_ymfpci_ac3_done(chip); /* Set PCI device to D3 state */ #if 0 /* FIXME: temporarily disabled, otherwise we cannot fire up * the chip again unless reboot. ACPI bug? */ pci_set_power_state(chip->pci, 3); #endif #ifdef CONFIG_PM vfree(chip->saved_regs); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); release_and_free_resource(chip->mpu_res); release_and_free_resource(chip->fm_res); snd_ymfpci_free_gameport(chip); if (chip->reg_area_virt) iounmap(chip->reg_area_virt); if (chip->work_ptr.area) snd_dma_free_pages(&chip->work_ptr); release_and_free_resource(chip->res_reg_area); pci_write_config_word(chip->pci, 0x40, chip->old_legacy_ctrl); pci_disable_device(chip->pci); release_firmware(chip->dsp_microcode); release_firmware(chip->controller_microcode); kfree(chip); return 0; } static int snd_ymfpci_dev_free(struct snd_device *device) { struct snd_ymfpci *chip = device->device_data; return snd_ymfpci_free(chip); } #ifdef CONFIG_PM static int saved_regs_index[] = { /* spdif */ YDSXGR_SPDIFOUTCTRL, YDSXGR_SPDIFOUTSTATUS, YDSXGR_SPDIFINCTRL, /* volumes */ YDSXGR_PRIADCLOOPVOL, YDSXGR_NATIVEDACINVOL, YDSXGR_NATIVEDACOUTVOL, YDSXGR_BUF441OUTVOL, YDSXGR_NATIVEADCINVOL, YDSXGR_SPDIFLOOPVOL, YDSXGR_SPDIFOUTVOL, YDSXGR_ZVOUTVOL, YDSXGR_LEGACYOUTVOL, /* address bases */ YDSXGR_PLAYCTRLBASE, YDSXGR_RECCTRLBASE, YDSXGR_EFFCTRLBASE, YDSXGR_WORKBASE, /* capture set up */ YDSXGR_MAPOFREC, YDSXGR_RECFORMAT, YDSXGR_RECSLOTSR, YDSXGR_ADCFORMAT, YDSXGR_ADCSLOTSR, }; #define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index) int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_pcm_suspend_all(chip->pcm2); snd_pcm_suspend_all(chip->pcm_spdif); snd_pcm_suspend_all(chip->pcm_4ch); snd_ac97_suspend(chip->ac97); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) chip->saved_regs[i] = snd_ymfpci_readl(chip, saved_regs_index[i]); chip->saved_ydsxgr_mode = snd_ymfpci_readl(chip, YDSXGR_MODE); pci_read_config_word(chip->pci, PCIR_DSXG_LEGACY, &chip->saved_dsxg_legacy); pci_read_config_word(chip->pci, PCIR_DSXG_ELEGACY, &chip->saved_dsxg_elegacy); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_disable_dsp(chip); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } int snd_ymfpci_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "ymfpci: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_ymfpci_aclink_reset(pci); snd_ymfpci_codec_ready(chip, 0); snd_ymfpci_download_image(chip); udelay(100); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) snd_ymfpci_writel(chip, saved_regs_index[i], chip->saved_regs[i]); snd_ac97_resume(chip->ac97); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, chip->saved_dsxg_legacy); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, chip->saved_dsxg_elegacy); /* start hw again */ if (chip->start_count > 0) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MODE, chip->saved_ydsxgr_mode); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT); spin_unlock_irq(&chip->reg_lock); } snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ int __devinit snd_ymfpci_create(struct snd_card *card, struct pci_dev * pci, unsigned short old_legacy_ctrl, struct snd_ymfpci ** rchip) { struct snd_ymfpci *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_ymfpci_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->old_legacy_ctrl = old_legacy_ctrl; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->voice_lock); init_waitqueue_head(&chip->interrupt_sleep); atomic_set(&chip->interrupt_sleep_count, 0); chip->card = card; chip->pci = pci; chip->irq = -1; chip->device_id = pci->device; chip->rev = pci->revision; chip->reg_area_phys = pci_resource_start(pci, 0); chip->reg_area_virt = ioremap_nocache(chip->reg_area_phys, 0x8000); pci_set_master(pci); chip->src441_used = -1; if ((chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI")) == NULL) { snd_printk(KERN_ERR "unable to grab memory region 0x%lx-0x%lx\n", chip->reg_area_phys, chip->reg_area_phys + 0x8000 - 1); snd_ymfpci_free(chip); return -EBUSY; } if (request_irq(pci->irq, snd_ymfpci_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_ymfpci_free(chip); return -EBUSY; } chip->irq = pci->irq; snd_ymfpci_aclink_reset(pci); if (snd_ymfpci_codec_ready(chip, 0) < 0) { snd_ymfpci_free(chip); return -EIO; } err = snd_ymfpci_request_firmware(chip); if (err < 0) { snd_printk(KERN_ERR "firmware request failed: %d\n", err); snd_ymfpci_free(chip); return err; } snd_ymfpci_download_image(chip); udelay(100); /* seems we need a delay after downloading image.. */ if (snd_ymfpci_memalloc(chip) < 0) { snd_ymfpci_free(chip); return -EIO; } if ((err = snd_ymfpci_ac3_init(chip)) < 0) { snd_ymfpci_free(chip); return err; } #ifdef CONFIG_PM chip->saved_regs = vmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32)); if (chip->saved_regs == NULL) { snd_ymfpci_free(chip); return -ENOMEM; } #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ymfpci_free(chip); return err; } snd_ymfpci_proc_init(card, chip); snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; }
gpl-2.0
alma-siwon/ALMA-Kernel-AOSP
sound/pci/ymfpci/ymfpci_main.c
4855
73287
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of YMF724/740/744/754 chips * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/tlv.h> #include <sound/ymfpci.h> #include <sound/asoundef.h> #include <sound/mpu401.h> #include <asm/io.h> #include <asm/byteorder.h> /* * common I/O routines */ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip); static inline u8 snd_ymfpci_readb(struct snd_ymfpci *chip, u32 offset) { return readb(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writeb(struct snd_ymfpci *chip, u32 offset, u8 val) { writeb(val, chip->reg_area_virt + offset); } static inline u16 snd_ymfpci_readw(struct snd_ymfpci *chip, u32 offset) { return readw(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writew(struct snd_ymfpci *chip, u32 offset, u16 val) { writew(val, chip->reg_area_virt + offset); } static inline u32 snd_ymfpci_readl(struct snd_ymfpci *chip, u32 offset) { return readl(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writel(struct snd_ymfpci *chip, u32 offset, u32 val) { writel(val, chip->reg_area_virt + offset); } static int snd_ymfpci_codec_ready(struct snd_ymfpci *chip, int secondary) { unsigned long end_time; u32 reg = secondary ? YDSXGR_SECSTATUSADR : YDSXGR_PRISTATUSADR; end_time = jiffies + msecs_to_jiffies(750); do { if ((snd_ymfpci_readw(chip, reg) & 0x8000) == 0) return 0; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_ymfpci_readw(chip, reg)); return -EBUSY; } static void snd_ymfpci_codec_write(struct snd_ac97 *ac97, u16 reg, u16 val) { struct snd_ymfpci *chip = ac97->private_data; u32 cmd; snd_ymfpci_codec_ready(chip, 0); cmd = ((YDSXG_AC97WRITECMD | reg) << 16) | val; snd_ymfpci_writel(chip, YDSXGR_AC97CMDDATA, cmd); } static u16 snd_ymfpci_codec_read(struct snd_ac97 *ac97, u16 reg) { struct snd_ymfpci *chip = ac97->private_data; if (snd_ymfpci_codec_ready(chip, 0)) return ~0; snd_ymfpci_writew(chip, YDSXGR_AC97CMDADR, YDSXG_AC97READCMD | reg); if (snd_ymfpci_codec_ready(chip, 0)) return ~0; if (chip->device_id == PCI_DEVICE_ID_YAMAHA_744 && chip->rev < 2) { int i; for (i = 0; i < 600; i++) snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } return snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } /* * Misc routines */ static u32 snd_ymfpci_calc_delta(u32 rate) { switch (rate) { case 8000: return 0x02aaab00; case 11025: return 0x03accd00; case 16000: return 0x05555500; case 22050: return 0x07599a00; case 32000: return 0x0aaaab00; case 44100: return 0x0eb33300; default: return ((rate << 16) / 375) << 5; } } static u32 def_rate[8] = { 100, 2000, 8000, 11025, 16000, 22050, 32000, 48000 }; static u32 snd_ymfpci_calc_lpfK(u32 rate) { u32 i; static u32 val[8] = { 0x00570000, 0x06AA0000, 0x18B20000, 0x20930000, 0x2B9A0000, 0x35A10000, 0x3EAA0000, 0x40000000 }; if (rate == 44100) return 0x40000000; /* FIXME: What's the right value? */ for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } static u32 snd_ymfpci_calc_lpfQ(u32 rate) { u32 i; static u32 val[8] = { 0x35280000, 0x34A70000, 0x32020000, 0x31770000, 0x31390000, 0x31C90000, 0x33D00000, 0x40000000 }; if (rate == 44100) return 0x370A0000; for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } /* * Hardware start management */ static void snd_ymfpci_hw_start(struct snd_ymfpci *chip) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->start_count++ > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | 3); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) { unsigned long flags; long timeout = 1000; spin_lock_irqsave(&chip->reg_lock, flags); if (--chip->start_count > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~3); while (timeout-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) break; } if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * Playback voice management */ static int voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { struct snd_ymfpci_voice *voice, *voice2; int idx; *rvoice = NULL; for (idx = 0; idx < YDSXG_PLAYBACK_VOICES; idx += pair ? 2 : 1) { voice = &chip->voices[idx]; voice2 = pair ? &chip->voices[idx+1] : NULL; if (voice->use || (voice2 && voice2->use)) continue; voice->use = 1; if (voice2) voice2->use = 1; switch (type) { case YMFPCI_PCM: voice->pcm = 1; if (voice2) voice2->pcm = 1; break; case YMFPCI_SYNTH: voice->synth = 1; break; case YMFPCI_MIDI: voice->midi = 1; break; } snd_ymfpci_hw_start(chip); if (voice2) snd_ymfpci_hw_start(chip); *rvoice = voice; return 0; } return -ENOMEM; } static int snd_ymfpci_voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(pair && type != YMFPCI_PCM)) return -EINVAL; spin_lock_irqsave(&chip->voice_lock, flags); for (;;) { result = voice_alloc(chip, type, pair, rvoice); if (result == 0 || type != YMFPCI_PCM) break; /* TODO: synth/midi voice deallocation */ break; } spin_unlock_irqrestore(&chip->voice_lock, flags); return result; } static int snd_ymfpci_voice_free(struct snd_ymfpci *chip, struct snd_ymfpci_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; snd_ymfpci_hw_stop(chip); spin_lock_irqsave(&chip->voice_lock, flags); if (pvoice->number == chip->src441_used) { chip->src441_used = -1; pvoice->ypcm->use_441_slot = 0; } pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0; pvoice->ypcm = NULL; pvoice->interrupt = NULL; spin_unlock_irqrestore(&chip->voice_lock, flags); return 0; } /* * PCM part */ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_voice *voice) { struct snd_ymfpci_pcm *ypcm; u32 pos, delta; if ((ypcm = voice->ypcm) == NULL) return; if (ypcm->substream == NULL) return; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(voice->bank[chip->active_bank].start); if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ ypcm->period_pos %= ypcm->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ypcm->substream); spin_lock(&chip->reg_lock); } if (unlikely(ypcm->update_pcm_vol)) { unsigned int subs = ypcm->substream->number; unsigned int next_bank = 1 - chip->active_bank; struct snd_ymfpci_playback_bank *bank; u32 volume; bank = &voice->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15); bank->left_gain_end = volume; if (ypcm->output_rear) bank->eff2_gain_end = volume; if (ypcm->voices[1]) bank = &ypcm->voices[1]->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15); bank->right_gain_end = volume; if (ypcm->output_rear) bank->eff3_gain_end = volume; ypcm->update_pcm_vol--; } } spin_unlock(&chip->reg_lock); } static void snd_ymfpci_pcm_capture_interrupt(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci *chip = ypcm->chip; u32 pos, delta; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { ypcm->period_pos %= ypcm->period_size; /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } static int snd_ymfpci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; struct snd_kcontrol *kctl = NULL; int result = 0; spin_lock(&chip->reg_lock); if (ypcm->voices[0] == NULL) { result = -EINVAL; goto __unlock; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->ctrl_playback[ypcm->voices[0]->number + 1] = cpu_to_le32(ypcm->voices[0]->bank_addr); if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = cpu_to_le32(ypcm->voices[1]->bank_addr); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } /* fall through */ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->ctrl_playback[ypcm->voices[0]->number + 1] = 0; if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = 0; ypcm->running = 0; break; default: result = -EINVAL; break; } __unlock: spin_unlock(&chip->reg_lock); if (kctl) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); return result; } static int snd_ymfpci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; int result = 0; u32 tmp; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 0; break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } static int snd_ymfpci_pcm_voice_alloc(struct snd_ymfpci_pcm *ypcm, int voices) { int err; if (ypcm->voices[1] != NULL && voices < 2) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (voices == 1 && ypcm->voices[0] != NULL) return 0; /* already allocated */ if (voices == 2 && ypcm->voices[0] != NULL && ypcm->voices[1] != NULL) return 0; /* already allocated */ if (voices > 1) { if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } } err = snd_ymfpci_voice_alloc(ypcm->chip, YMFPCI_PCM, voices > 1, &ypcm->voices[0]); if (err < 0) return err; ypcm->voices[0]->ypcm = ypcm; ypcm->voices[0]->interrupt = snd_ymfpci_pcm_interrupt; if (voices > 1) { ypcm->voices[1] = &ypcm->chip->voices[ypcm->voices[0]->number + 1]; ypcm->voices[1]->ypcm = ypcm; } return 0; } static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int voiceidx, struct snd_pcm_runtime *runtime, int has_pcm_volume) { struct snd_ymfpci_voice *voice = ypcm->voices[voiceidx]; u32 format; u32 delta = snd_ymfpci_calc_delta(runtime->rate); u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate); u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate); struct snd_ymfpci_playback_bank *bank; unsigned int nbank; u32 vol_left, vol_right; u8 use_left, use_right; unsigned long flags; if (snd_BUG_ON(!voice)) return; if (runtime->channels == 1) { use_left = 1; use_right = 1; } else { use_left = (voiceidx & 1) == 0; use_right = !use_left; } if (has_pcm_volume) { vol_left = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].left << 15); vol_right = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].right << 15); } else { vol_left = cpu_to_le32(0x40000000); vol_right = cpu_to_le32(0x40000000); } spin_lock_irqsave(&ypcm->chip->voice_lock, flags); format = runtime->channels == 2 ? 0x00010000 : 0; if (snd_pcm_format_width(runtime->format) == 8) format |= 0x80000000; else if (ypcm->chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && runtime->rate == 44100 && runtime->channels == 2 && voiceidx == 0 && (ypcm->chip->src441_used == -1 || ypcm->chip->src441_used == voice->number)) { ypcm->chip->src441_used = voice->number; ypcm->use_441_slot = 1; format |= 0x10000000; } if (ypcm->chip->src441_used == voice->number && (format & 0x10000000) == 0) { ypcm->chip->src441_used = -1; ypcm->use_441_slot = 0; } if (runtime->channels == 2 && (voiceidx & 1) != 0) format |= 1; spin_unlock_irqrestore(&ypcm->chip->voice_lock, flags); for (nbank = 0; nbank < 2; nbank++) { bank = &voice->bank[nbank]; memset(bank, 0, sizeof(*bank)); bank->format = cpu_to_le32(format); bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size); bank->lpfQ = cpu_to_le32(lpfQ); bank->delta = bank->delta_end = cpu_to_le32(delta); bank->lpfK = bank->lpfK_end = cpu_to_le32(lpfK); bank->eg_gain = bank->eg_gain_end = cpu_to_le32(0x40000000); if (ypcm->output_front) { if (use_left) { bank->left_gain = bank->left_gain_end = vol_left; } if (use_right) { bank->right_gain = bank->right_gain_end = vol_right; } } if (ypcm->output_rear) { if (!ypcm->swap_rear) { if (use_left) { bank->eff2_gain = bank->eff2_gain_end = vol_left; } if (use_right) { bank->eff3_gain = bank->eff3_gain_end = vol_right; } } else { /* The SPDIF out channels seem to be swapped, so we have * to swap them here, too. The rear analog out channels * will be wrong, but otherwise AC3 would not work. */ if (use_left) { bank->eff3_gain = bank->eff3_gain_end = vol_left; } if (use_right) { bank->eff2_gain = bank->eff2_gain_end = vol_right; } } } } } static int __devinit snd_ymfpci_ac3_init(struct snd_ymfpci *chip) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 4096, &chip->ac3_tmp_base) < 0) return -ENOMEM; chip->bank_effect[3][0]->base = chip->bank_effect[3][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr); chip->bank_effect[3][0]->loop_end = chip->bank_effect[3][1]->loop_end = cpu_to_le32(1024); chip->bank_effect[4][0]->base = chip->bank_effect[4][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr + 2048); chip->bank_effect[4][0]->loop_end = chip->bank_effect[4][1]->loop_end = cpu_to_le32(1024); spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) | 3 << 3); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_ac3_done(struct snd_ymfpci *chip) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) & ~(3 << 3)); spin_unlock_irq(&chip->reg_lock); // snd_ymfpci_irq_wait(chip); if (chip->ac3_tmp_base.area) { snd_dma_free_pages(&chip->ac3_tmp_base); chip->ac3_tmp_base.area = NULL; } return 0; } static int snd_ymfpci_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; if ((err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params))) < 0) return err; return 0; } static int snd_ymfpci_playback_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; if (runtime->private_data == NULL) return 0; ypcm = runtime->private_data; /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); snd_pcm_lib_free_pages(substream); if (ypcm->voices[1]) { snd_ymfpci_voice_free(chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (ypcm->voices[0]) { snd_ymfpci_voice_free(chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } return 0; } static int snd_ymfpci_playback_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_kcontrol *kctl; unsigned int nvoice; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; for (nvoice = 0; nvoice < runtime->channels; nvoice++) snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime, substream->pcm == chip->pcm); if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); } return 0; } static int snd_ymfpci_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ymfpci_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); return snd_pcm_lib_free_pages(substream); } static int snd_ymfpci_capture_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_capture_bank * bank; int nbank; u32 rate, format; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; ypcm->shift = 0; rate = ((48000 * 4096) / runtime->rate) - 1; format = 0; if (runtime->channels == 2) { format |= 2; ypcm->shift++; } if (snd_pcm_format_width(runtime->format) == 8) format |= 1; else ypcm->shift++; switch (ypcm->capture_bank_number) { case 0: snd_ymfpci_writel(chip, YDSXGR_RECFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_RECSLOTSR, rate); break; case 1: snd_ymfpci_writel(chip, YDSXGR_ADCFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_ADCSLOTSR, rate); break; } for (nbank = 0; nbank < 2; nbank++) { bank = chip->bank_capture[ypcm->capture_bank_number][nbank]; bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size << ypcm->shift); bank->start = 0; bank->num_of_loops = 0; } return 0; } static snd_pcm_uframes_t snd_ymfpci_playback_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_voice *voice = ypcm->voices[0]; if (!(ypcm->running && voice)) return 0; return le32_to_cpu(voice->bank[chip->active_bank].start); } static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (!ypcm->running) return 0; return le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; } static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) { wait_queue_t wait; int loops = 4; while (loops-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_MODE) & 3) == 0) continue; init_waitqueue_entry(&wait, current); add_wait_queue(&chip->interrupt_sleep, &wait); atomic_inc(&chip->interrupt_sleep_count); schedule_timeout_uninterruptible(msecs_to_jiffies(50)); remove_wait_queue(&chip->interrupt_sleep, &wait); } } static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) { struct snd_ymfpci *chip = dev_id; u32 status, nvoice, mode; struct snd_ymfpci_voice *voice; status = snd_ymfpci_readl(chip, YDSXGR_STATUS); if (status & 0x80000000) { chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; spin_lock(&chip->voice_lock); for (nvoice = 0; nvoice < YDSXG_PLAYBACK_VOICES; nvoice++) { voice = &chip->voices[nvoice]; if (voice->interrupt) voice->interrupt(chip, voice); } for (nvoice = 0; nvoice < YDSXG_CAPTURE_VOICES; nvoice++) { if (chip->capture_substream[nvoice]) snd_ymfpci_pcm_capture_interrupt(chip->capture_substream[nvoice]); } #if 0 for (nvoice = 0; nvoice < YDSXG_EFFECT_VOICES; nvoice++) { if (chip->effect_substream[nvoice]) snd_ymfpci_pcm_effect_interrupt(chip->effect_substream[nvoice]); } #endif spin_unlock(&chip->voice_lock); spin_lock(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_STATUS, 0x80000000); mode = snd_ymfpci_readl(chip, YDSXGR_MODE) | 2; snd_ymfpci_writel(chip, YDSXGR_MODE, mode); spin_unlock(&chip->reg_lock); if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } } status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG); if (status & 1) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer_ticks); } snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status); if (chip->rawmidi) snd_mpu401_uart_interrupt(irq, chip->rawmidi->private_data); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_ymfpci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ymfpci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static void snd_ymfpci_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int snd_ymfpci_playback_open_1(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_playback; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = PLAYBACK_VOICE; ypcm->substream = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; return 0; } /* call with spinlock held */ static void ymfpci_open_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) /* set AC3 */ snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | (1 << 30)); /* enable second codec (4CHEN) */ snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) | 0x0010); } } /* call with spinlock held */ static void ymfpci_close_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~(1 << 30)); snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) & ~0x0010); } } static int snd_ymfpci_playback_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 1; ypcm->output_rear = chip->mode_dup4ch ? 1 : 0; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear) { ymfpci_open_extension(chip); chip->rear_opened++; } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_playback_spdif_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 1; spin_lock_irq(&chip->reg_lock); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) | 2); ymfpci_open_extension(chip); chip->spdif_pcm_bits = chip->spdif_bits; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); chip->spdif_opened++; spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return 0; } static int snd_ymfpci_playback_4ch_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); ymfpci_open_extension(chip); chip->rear_opened++; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_capture_open(struct snd_pcm_substream *substream, u32 capture_bank_number) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_capture; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = capture_bank_number + CAPTURE_REC; ypcm->substream = substream; ypcm->capture_bank_number = capture_bank_number; chip->capture_substream[capture_bank_number] = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; snd_ymfpci_hw_start(chip); return 0; } static int snd_ymfpci_capture_rec_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 0); } static int snd_ymfpci_capture_ac97_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 1); } static int snd_ymfpci_playback_close_1(struct snd_pcm_substream *substream) { return 0; } static int snd_ymfpci_playback_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear && chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_spdif_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); chip->spdif_opened = 0; ymfpci_close_extension(chip); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & ~2); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_4ch_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); if (chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_capture_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (ypcm != NULL) { chip->capture_substream[ypcm->capture_bank_number] = NULL; snd_ymfpci_hw_stop(chip); } return 0; } static struct snd_pcm_ops snd_ymfpci_playback_ops = { .open = snd_ymfpci_playback_open, .close = snd_ymfpci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = { .open = snd_ymfpci_capture_rec_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_rec_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = { .open = snd_ymfpci_capture_ac97_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_ac97_ops); /* global setup */ pcm->info_flags = 0; sprintf(pcm->name, "YMFPCI - %s", chip->device_id == PCI_DEVICE_ID_YAMAHA_754 ? "Direct Recording" : "AC'97"); chip->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = { .open = snd_ymfpci_playback_spdif_open, .close = snd_ymfpci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_spdif_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - IEC958"); chip->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = { .open = snd_ymfpci_playback_4ch_open, .close = snd_ymfpci_playback_4ch_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_4ch_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - Rear PCM"); chip->pcm_4ch = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static int snd_ymfpci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_bits != val; chip->spdif_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 1) && chip->pcm_spdif == NULL) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ymfpci_spdif_default_info, .get = snd_ymfpci_spdif_default_get, .put = snd_ymfpci_spdif_default_put }; static int snd_ymfpci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = 0x3e; ucontrol->value.iec958.status[1] = 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static struct snd_kcontrol_new snd_ymfpci_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_ymfpci_spdif_mask_info, .get = snd_ymfpci_spdif_mask_get, }; static int snd_ymfpci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_pcm_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_pcm_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_pcm_bits != val; chip->spdif_pcm_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 2)) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_ymfpci_spdif_stream_info, .get = snd_ymfpci_spdif_stream_get, .put = snd_ymfpci_spdif_stream_put }; static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"}; return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg; spin_lock_irq(&chip->reg_lock); reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); spin_unlock_irq(&chip->reg_lock); if (!(reg & 0x100)) value->value.enumerated.item[0] = 0; else value->value.enumerated.item[0] = 1 + ((reg & 0x200) != 0); return 0; } static int snd_ymfpci_drec_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg, old_reg; spin_lock_irq(&chip->reg_lock); old_reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); if (value->value.enumerated.item[0] == 0) reg = old_reg & ~0x100; else reg = (old_reg & ~0x300) | 0x100 | ((value->value.enumerated.item[0] == 2) << 9); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, reg); spin_unlock_irq(&chip->reg_lock); return reg != old_reg; } static struct snd_kcontrol_new snd_ymfpci_drec_source __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Direct Recording Source", .info = snd_ymfpci_drec_source_info, .get = snd_ymfpci_drec_source_get, .put = snd_ymfpci_drec_source_put }; /* * Mixer controls */ #define YMFPCI_SINGLE(xname, xindex, reg, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ymfpci_info_single, \ .get = snd_ymfpci_get_single, .put = snd_ymfpci_put_single, \ .private_value = ((reg) | ((shift) << 16)) } #define snd_ymfpci_info_single snd_ctl_boolean_mono_info static int snd_ymfpci_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } ucontrol->value.integer.value[0] = (snd_ymfpci_readl(chip, reg) >> shift) & mask; return 0; } static int snd_ymfpci_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; int change; unsigned int val, oval; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } val = (ucontrol->value.integer.value[0] & mask); val <<= shift; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_ymfpci_writel(chip, reg, val); spin_unlock_irq(&chip->reg_lock); return change; } static const DECLARE_TLV_DB_LINEAR(db_scale_native, TLV_DB_GAIN_MUTE, 0); #define YMFPCI_DOUBLE(xname, xindex, reg) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = snd_ymfpci_info_double, \ .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_double, \ .private_value = reg, \ .tlv = { .p = db_scale_native } } static int snd_ymfpci_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int reg = kcontrol->private_value; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 16383; return 0; } static int snd_ymfpci_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; unsigned int val; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; spin_lock_irq(&chip->reg_lock); val = snd_ymfpci_readl(chip, reg); spin_unlock_irq(&chip->reg_lock); ucontrol->value.integer.value[0] = (val >> shift_left) & mask; ucontrol->value.integer.value[1] = (val >> shift_right) & mask; return 0; } static int snd_ymfpci_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; int change; unsigned int val1, val2, oval; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val1 = (oval & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2; change = val1 != oval; snd_ymfpci_writel(chip, reg, val1); spin_unlock_irq(&chip->reg_lock); return change; } static int snd_ymfpci_put_nativedacvol(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = YDSXGR_NATIVEDACOUTVOL; unsigned int reg2 = YDSXGR_BUF441OUTVOL; int change; unsigned int value, oval; value = ucontrol->value.integer.value[0] & 0x3fff; value |= (ucontrol->value.integer.value[1] & 0x3fff) << 16; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); change = value != oval; snd_ymfpci_writel(chip, reg, value); snd_ymfpci_writel(chip, reg2, value); spin_unlock_irq(&chip->reg_lock); return change; } /* * 4ch duplication */ #define snd_ymfpci_info_dup4ch snd_ctl_boolean_mono_info static int snd_ymfpci_get_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->mode_dup4ch; return 0; } static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int change; change = (ucontrol->value.integer.value[0] != chip->mode_dup4ch); if (change) chip->mode_dup4ch = !!ucontrol->value.integer.value[0]; return change; } static struct snd_kcontrol_new snd_ymfpci_dup4ch __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "4ch Duplication", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_ymfpci_info_dup4ch, .get = snd_ymfpci_get_dup4ch, .put = snd_ymfpci_put_dup4ch, }; static struct snd_kcontrol_new snd_ymfpci_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wave Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = snd_ymfpci_info_double, .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_nativedacvol, .private_value = YDSXGR_NATIVEDACOUTVOL, .tlv = { .p = db_scale_native }, }, YMFPCI_DOUBLE("Wave Capture Volume", 0, YDSXGR_NATIVEDACLOOPVOL), YMFPCI_DOUBLE("Digital Capture Volume", 0, YDSXGR_NATIVEDACINVOL), YMFPCI_DOUBLE("Digital Capture Volume", 1, YDSXGR_NATIVEADCINVOL), YMFPCI_DOUBLE("ADC Playback Volume", 0, YDSXGR_PRIADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 0, YDSXGR_PRIADCLOOPVOL), YMFPCI_DOUBLE("ADC Playback Volume", 1, YDSXGR_SECADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 1, YDSXGR_SECADCLOOPVOL), YMFPCI_DOUBLE("FM Legacy Playback Volume", 0, YDSXGR_LEGACYOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ", PLAYBACK,VOLUME), 0, YDSXGR_ZVOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("", CAPTURE,VOLUME), 0, YDSXGR_ZVLOOPVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ",PLAYBACK,VOLUME), 1, YDSXGR_SPDIFOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,VOLUME), 1, YDSXGR_SPDIFLOOPVOL), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 0, YDSXGR_SPDIFOUTCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, YDSXGR_SPDIFINCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("Loop",NONE,NONE), 0, YDSXGR_SPDIFINCTRL, 4), }; /* * GPIO */ static int snd_ymfpci_get_gpio_out(struct snd_ymfpci *chip, int pin) { u16 reg, mode; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << (pin + 8)); reg |= (1 << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); /* set the level mode for input line */ mode = snd_ymfpci_readw(chip, YDSXGR_GPIOTYPECONFIG); mode &= ~(3 << (pin * 2)); snd_ymfpci_writew(chip, YDSXGR_GPIOTYPECONFIG, mode); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); mode = snd_ymfpci_readw(chip, YDSXGR_GPIOINSTATUS); spin_unlock_irqrestore(&chip->reg_lock, flags); return (mode >> pin) & 1; } static int snd_ymfpci_set_gpio_out(struct snd_ymfpci *chip, int pin, int enable) { u16 reg; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << pin); reg &= ~(1 << (pin + 8)); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); snd_ymfpci_writew(chip, YDSXGR_GPIOOUTCTRL, enable << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } #define snd_ymfpci_gpio_sw_info snd_ctl_boolean_mono_info static int snd_ymfpci_gpio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 0; } static int snd_ymfpci_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; if (snd_ymfpci_get_gpio_out(chip, pin) != ucontrol->value.integer.value[0]) { snd_ymfpci_set_gpio_out(chip, pin, !!ucontrol->value.integer.value[0]); ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_rear_shared __devinitdata = { .name = "Shared Rear/Line-In Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_ymfpci_gpio_sw_info, .get = snd_ymfpci_gpio_sw_get, .put = snd_ymfpci_gpio_sw_put, .private_value = 2, }; /* * PCM voice volume */ static int snd_ymfpci_pcm_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x8000; return 0; } static int snd_ymfpci_pcm_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left; ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right; return 0; } static int snd_ymfpci_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; struct snd_pcm_substream *substream; unsigned long flags; if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left || ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) { chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0]; chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1]; if (chip->pcm_mixer[subs].left > 0x8000) chip->pcm_mixer[subs].left = 0x8000; if (chip->pcm_mixer[subs].right > 0x8000) chip->pcm_mixer[subs].right = 0x8000; substream = (struct snd_pcm_substream *)kcontrol->private_value; spin_lock_irqsave(&chip->voice_lock, flags); if (substream->runtime && substream->runtime->private_data) { struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; if (!ypcm->use_441_slot) ypcm->update_pcm_vol = 2; } spin_unlock_irqrestore(&chip->voice_lock, flags); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_pcm_volume __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "PCM Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = snd_ymfpci_pcm_vol_info, .get = snd_ymfpci_pcm_vol_get, .put = snd_ymfpci_pcm_vol_put, }; /* * Mixer routines */ static void snd_ymfpci_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_ymfpci *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ymfpci_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_ymfpci *chip = ac97->private_data; chip->ac97 = NULL; } int __devinit snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch) { struct snd_ac97_template ac97; struct snd_kcontrol *kctl; struct snd_pcm_substream *substream; unsigned int idx; int err; static struct snd_ac97_bus_ops ops = { .write = snd_ymfpci_codec_write, .read = snd_ymfpci_codec_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_ymfpci_mixer_free_ac97_bus; chip->ac97_bus->no_vra = 1; /* YMFPCI doesn't need VRA */ memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ymfpci_mixer_free_ac97; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; /* to be sure */ snd_ac97_update_bits(chip->ac97, AC97_EXTENDED_STATUS, AC97_EA_VRA|AC97_EA_VRM, 0); for (idx = 0; idx < ARRAY_SIZE(snd_ymfpci_controls); idx++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0) return err; } if (chip->ac97->ext_id & AC97_EI_SDAC) { kctl = snd_ctl_new1(&snd_ymfpci_dup4ch, chip); err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; } /* add S/PDIF control */ if (snd_BUG_ON(!chip->pcm_spdif)) return -ENXIO; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; chip->spdif_pcm_ctl = kctl; /* direct recording source */ if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && (err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip))) < 0) return err; /* * shared rear/line-in */ if (rear_switch) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip))) < 0) return err; } /* per-voice volume */ substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; for (idx = 0; idx < 32; ++idx) { kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip); if (!kctl) return -ENOMEM; kctl->id.device = chip->pcm->device; kctl->id.subdevice = idx; kctl->private_value = (unsigned long)substream; if ((err = snd_ctl_add(chip->card, kctl)) < 0) return err; chip->pcm_mixer[idx].left = 0x8000; chip->pcm_mixer[idx].right = 0x8000; chip->pcm_mixer[idx].ctl = kctl; substream = substream->next; } return 0; } /* * timer */ static int snd_ymfpci_timer_start(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; unsigned int count; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); if (timer->sticks > 1) { chip->timer_ticks = timer->sticks; count = timer->sticks - 1; } else { /* * Divisor 1 is not allowed; fake it by using divisor 2 and * counting two ticks for each interrupt. */ chip->timer_ticks = 2; count = 2 - 1; } snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_stop(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x00); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer, unsigned long *num, unsigned long *den) { *num = 1; *den = 96000; return 0; } static struct snd_timer_hardware snd_ymfpci_timer_hw = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */ .ticks = 0x10000, .start = snd_ymfpci_timer_start, .stop = snd_ymfpci_timer_stop, .precise_resolution = snd_ymfpci_timer_precise_resolution, }; int __devinit snd_ymfpci_timer(struct snd_ymfpci *chip, int device) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = chip->card->number; tid.device = device; tid.subdevice = 0; if ((err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer)) >= 0) { strcpy(timer->name, "YMFPCI timer"); timer->private_data = chip; timer->hw = snd_ymfpci_timer_hw; } chip->timer = timer; return err; } /* * proc interface */ static void snd_ymfpci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ymfpci *chip = entry->private_data; int i; snd_iprintf(buffer, "YMFPCI\n\n"); for (i = 0; i <= YDSXGR_WORKBASE; i += 4) snd_iprintf(buffer, "%04x: %04x\n", i, snd_ymfpci_readl(chip, i)); } static int __devinit snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(card, "ymfpci", &entry)) snd_info_set_text_ops(entry, chip, snd_ymfpci_proc_read); return 0; } /* * initialization routines */ static void snd_ymfpci_aclink_reset(struct pci_dev * pci) { u8 cmd; pci_read_config_byte(pci, PCIR_DSXG_CTRL, &cmd); #if 0 // force to reset if (cmd & 0x03) { #endif pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd | 0x03); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL1, 0); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL2, 0); #if 0 } #endif } static void snd_ymfpci_enable_dsp(struct snd_ymfpci *chip) { snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000001); } static void snd_ymfpci_disable_dsp(struct snd_ymfpci *chip) { u32 val; int timeout = 1000; val = snd_ymfpci_readl(chip, YDSXGR_CONFIG); if (val) snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000000); while (timeout-- > 0) { val = snd_ymfpci_readl(chip, YDSXGR_STATUS); if ((val & 0x00000002) == 0) break; } } static int snd_ymfpci_request_firmware(struct snd_ymfpci *chip) { int err, is_1e; const char *name; err = request_firmware(&chip->dsp_microcode, "yamaha/ds1_dsp.fw", &chip->pci->dev); if (err >= 0) { if (chip->dsp_microcode->size != YDSXG_DSPLENGTH) { snd_printk(KERN_ERR "DSP microcode has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; is_1e = chip->device_id == PCI_DEVICE_ID_YAMAHA_724F || chip->device_id == PCI_DEVICE_ID_YAMAHA_740C || chip->device_id == PCI_DEVICE_ID_YAMAHA_744 || chip->device_id == PCI_DEVICE_ID_YAMAHA_754; name = is_1e ? "yamaha/ds1e_ctrl.fw" : "yamaha/ds1_ctrl.fw"; err = request_firmware(&chip->controller_microcode, name, &chip->pci->dev); if (err >= 0) { if (chip->controller_microcode->size != YDSXG_CTRLLENGTH) { snd_printk(KERN_ERR "controller microcode" " has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; return 0; } MODULE_FIRMWARE("yamaha/ds1_dsp.fw"); MODULE_FIRMWARE("yamaha/ds1_ctrl.fw"); MODULE_FIRMWARE("yamaha/ds1e_ctrl.fw"); static void snd_ymfpci_download_image(struct snd_ymfpci *chip) { int i; u16 ctrl; const __le32 *inst; snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x00000000); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00010000); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0x00000000); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); /* setup DSP instruction code */ inst = (const __le32 *)chip->dsp_microcode->data; for (i = 0; i < YDSXG_DSPLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_DSPINSTRAM + (i << 2), le32_to_cpu(inst[i])); /* setup control instruction code */ inst = (const __le32 *)chip->controller_microcode->data; for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_CTRLINSTRAM + (i << 2), le32_to_cpu(inst[i])); snd_ymfpci_enable_dsp(chip); } static int __devinit snd_ymfpci_memalloc(struct snd_ymfpci *chip) { long size, playback_ctrl_size; int voice, bank, reg; u8 *ptr; dma_addr_t ptr_addr; playback_ctrl_size = 4 + 4 * YDSXG_PLAYBACK_VOICES; chip->bank_size_playback = snd_ymfpci_readl(chip, YDSXGR_PLAYCTRLSIZE) << 2; chip->bank_size_capture = snd_ymfpci_readl(chip, YDSXGR_RECCTRLSIZE) << 2; chip->bank_size_effect = snd_ymfpci_readl(chip, YDSXGR_EFFCTRLSIZE) << 2; chip->work_size = YDSXG_DEFAULT_WORK_SIZE; size = ALIGN(playback_ctrl_size, 0x100) + ALIGN(chip->bank_size_playback * 2 * YDSXG_PLAYBACK_VOICES, 0x100) + ALIGN(chip->bank_size_capture * 2 * YDSXG_CAPTURE_VOICES, 0x100) + ALIGN(chip->bank_size_effect * 2 * YDSXG_EFFECT_VOICES, 0x100) + chip->work_size; /* work_ptr must be aligned to 256 bytes, but it's already covered with the kernel page allocation mechanism */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), size, &chip->work_ptr) < 0) return -ENOMEM; ptr = chip->work_ptr.area; ptr_addr = chip->work_ptr.addr; memset(ptr, 0, size); /* for sure */ chip->bank_base_playback = ptr; chip->bank_base_playback_addr = ptr_addr; chip->ctrl_playback = (u32 *)ptr; chip->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES); ptr += ALIGN(playback_ctrl_size, 0x100); ptr_addr += ALIGN(playback_ctrl_size, 0x100); for (voice = 0; voice < YDSXG_PLAYBACK_VOICES; voice++) { chip->voices[voice].number = voice; chip->voices[voice].bank = (struct snd_ymfpci_playback_bank *)ptr; chip->voices[voice].bank_addr = ptr_addr; for (bank = 0; bank < 2; bank++) { chip->bank_playback[voice][bank] = (struct snd_ymfpci_playback_bank *)ptr; ptr += chip->bank_size_playback; ptr_addr += chip->bank_size_playback; } } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_capture = ptr; chip->bank_base_capture_addr = ptr_addr; for (voice = 0; voice < YDSXG_CAPTURE_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_capture[voice][bank] = (struct snd_ymfpci_capture_bank *)ptr; ptr += chip->bank_size_capture; ptr_addr += chip->bank_size_capture; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_effect = ptr; chip->bank_base_effect_addr = ptr_addr; for (voice = 0; voice < YDSXG_EFFECT_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_effect[voice][bank] = (struct snd_ymfpci_effect_bank *)ptr; ptr += chip->bank_size_effect; ptr_addr += chip->bank_size_effect; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->work_base = ptr; chip->work_base_addr = ptr_addr; snd_BUG_ON(ptr + chip->work_size != chip->work_ptr.area + chip->work_ptr.bytes); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, chip->bank_base_capture_addr); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, chip->bank_base_effect_addr); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, chip->work_base_addr); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, chip->work_size >> 2); /* S/PDIF output initialization */ chip->spdif_bits = chip->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF & 0xffff; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, 0); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); /* S/PDIF input initialization */ snd_ymfpci_writew(chip, YDSXGR_SPDIFINCTRL, 0); /* digital mixer setup */ for (reg = 0x80; reg < 0xc0; reg += 4) snd_ymfpci_writel(chip, reg, 0); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_ZVOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_SPDIFOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEADCINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_PRIADCLOOPVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0x3fff3fff); return 0; } static int snd_ymfpci_free(struct snd_ymfpci *chip) { u16 ctrl; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->res_reg_area) { /* don't touch busy hardware */ snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_STATUS, ~0); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, 0); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); } snd_ymfpci_ac3_done(chip); /* Set PCI device to D3 state */ #if 0 /* FIXME: temporarily disabled, otherwise we cannot fire up * the chip again unless reboot. ACPI bug? */ pci_set_power_state(chip->pci, 3); #endif #ifdef CONFIG_PM vfree(chip->saved_regs); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); release_and_free_resource(chip->mpu_res); release_and_free_resource(chip->fm_res); snd_ymfpci_free_gameport(chip); if (chip->reg_area_virt) iounmap(chip->reg_area_virt); if (chip->work_ptr.area) snd_dma_free_pages(&chip->work_ptr); release_and_free_resource(chip->res_reg_area); pci_write_config_word(chip->pci, 0x40, chip->old_legacy_ctrl); pci_disable_device(chip->pci); release_firmware(chip->dsp_microcode); release_firmware(chip->controller_microcode); kfree(chip); return 0; } static int snd_ymfpci_dev_free(struct snd_device *device) { struct snd_ymfpci *chip = device->device_data; return snd_ymfpci_free(chip); } #ifdef CONFIG_PM static int saved_regs_index[] = { /* spdif */ YDSXGR_SPDIFOUTCTRL, YDSXGR_SPDIFOUTSTATUS, YDSXGR_SPDIFINCTRL, /* volumes */ YDSXGR_PRIADCLOOPVOL, YDSXGR_NATIVEDACINVOL, YDSXGR_NATIVEDACOUTVOL, YDSXGR_BUF441OUTVOL, YDSXGR_NATIVEADCINVOL, YDSXGR_SPDIFLOOPVOL, YDSXGR_SPDIFOUTVOL, YDSXGR_ZVOUTVOL, YDSXGR_LEGACYOUTVOL, /* address bases */ YDSXGR_PLAYCTRLBASE, YDSXGR_RECCTRLBASE, YDSXGR_EFFCTRLBASE, YDSXGR_WORKBASE, /* capture set up */ YDSXGR_MAPOFREC, YDSXGR_RECFORMAT, YDSXGR_RECSLOTSR, YDSXGR_ADCFORMAT, YDSXGR_ADCSLOTSR, }; #define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index) int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_pcm_suspend_all(chip->pcm2); snd_pcm_suspend_all(chip->pcm_spdif); snd_pcm_suspend_all(chip->pcm_4ch); snd_ac97_suspend(chip->ac97); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) chip->saved_regs[i] = snd_ymfpci_readl(chip, saved_regs_index[i]); chip->saved_ydsxgr_mode = snd_ymfpci_readl(chip, YDSXGR_MODE); pci_read_config_word(chip->pci, PCIR_DSXG_LEGACY, &chip->saved_dsxg_legacy); pci_read_config_word(chip->pci, PCIR_DSXG_ELEGACY, &chip->saved_dsxg_elegacy); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_disable_dsp(chip); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } int snd_ymfpci_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "ymfpci: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_ymfpci_aclink_reset(pci); snd_ymfpci_codec_ready(chip, 0); snd_ymfpci_download_image(chip); udelay(100); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) snd_ymfpci_writel(chip, saved_regs_index[i], chip->saved_regs[i]); snd_ac97_resume(chip->ac97); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, chip->saved_dsxg_legacy); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, chip->saved_dsxg_elegacy); /* start hw again */ if (chip->start_count > 0) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MODE, chip->saved_ydsxgr_mode); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT); spin_unlock_irq(&chip->reg_lock); } snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ int __devinit snd_ymfpci_create(struct snd_card *card, struct pci_dev * pci, unsigned short old_legacy_ctrl, struct snd_ymfpci ** rchip) { struct snd_ymfpci *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_ymfpci_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->old_legacy_ctrl = old_legacy_ctrl; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->voice_lock); init_waitqueue_head(&chip->interrupt_sleep); atomic_set(&chip->interrupt_sleep_count, 0); chip->card = card; chip->pci = pci; chip->irq = -1; chip->device_id = pci->device; chip->rev = pci->revision; chip->reg_area_phys = pci_resource_start(pci, 0); chip->reg_area_virt = ioremap_nocache(chip->reg_area_phys, 0x8000); pci_set_master(pci); chip->src441_used = -1; if ((chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI")) == NULL) { snd_printk(KERN_ERR "unable to grab memory region 0x%lx-0x%lx\n", chip->reg_area_phys, chip->reg_area_phys + 0x8000 - 1); snd_ymfpci_free(chip); return -EBUSY; } if (request_irq(pci->irq, snd_ymfpci_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_ymfpci_free(chip); return -EBUSY; } chip->irq = pci->irq; snd_ymfpci_aclink_reset(pci); if (snd_ymfpci_codec_ready(chip, 0) < 0) { snd_ymfpci_free(chip); return -EIO; } err = snd_ymfpci_request_firmware(chip); if (err < 0) { snd_printk(KERN_ERR "firmware request failed: %d\n", err); snd_ymfpci_free(chip); return err; } snd_ymfpci_download_image(chip); udelay(100); /* seems we need a delay after downloading image.. */ if (snd_ymfpci_memalloc(chip) < 0) { snd_ymfpci_free(chip); return -EIO; } if ((err = snd_ymfpci_ac3_init(chip)) < 0) { snd_ymfpci_free(chip); return err; } #ifdef CONFIG_PM chip->saved_regs = vmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32)); if (chip->saved_regs == NULL) { snd_ymfpci_free(chip); return -ENOMEM; } #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ymfpci_free(chip); return err; } snd_ymfpci_proc_init(card, chip); snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; }
gpl-2.0
brymaster5000/m7-501
fs/udf/truncate.c
5111
8072
/* * truncate.c * * PURPOSE * Truncate handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1999-2004 Ben Fennema * (C) 1999 Stelias Computing Inc * * HISTORY * * 02/24/99 blf Created. * */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include "udf_i.h" #include "udf_sb.h" static void extent_trunc(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, int8_t etype, uint32_t elen, uint32_t nelen) { struct kernel_lb_addr neloc = {}; int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (nelen) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block); etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30); } else neloc = *eloc; nelen = (etype << 30) | nelen; } if (elen != nelen) { udf_write_aext(inode, epos, &neloc, nelen, 0); if (last_block - first_block > 0) { if (etype == (EXT_RECORDED_ALLOCATED >> 30)) mark_inode_dirty(inode); if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block); } } } /* * Truncate the last extent to match i_size. This function assumes * that preallocation extent is already truncated. */ void udf_truncate_tail_extent(struct inode *inode) { struct extent_position epos = {}; struct kernel_lb_addr eloc; uint32_t elen, nelen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; /* Are we going to delete the file anyway? */ if (inode->i_nlink == 0) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; if (lbcount > inode->i_size) { if (lbcount - inode->i_size >= inode->i_sb->s_blocksize) udf_warn(inode->i_sb, "Too long extent after EOF in inode %u: i_size: %lld lbcount: %lld extent %u+%u\n", (unsigned)inode->i_ino, (long long)inode->i_size, (long long)lbcount, (unsigned)eloc.logicalBlockNum, (unsigned)elen); nelen = elen - (lbcount - inode->i_size); epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, nelen); epos.offset += adsize; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1) udf_err(inode->i_sb, "Extent after EOF in inode %u\n", (unsigned)inode->i_ino); break; } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); } void udf_discard_prealloc(struct inode *inode) { struct extent_position epos = { NULL, 0, {0, 0} }; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; epos.block = iinfo->i_location; /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { epos.offset -= adsize; lbcount -= elen; extent_trunc(inode, &epos, &eloc, etype, elen, 0); if (!epos.bh) { iinfo->i_lenAlloc = epos.offset - udf_file_entry_alloc_offset(inode); mark_inode_dirty(inode); } else { struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data); aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos.bh->b_data, epos.offset); else udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos.bh, inode); } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, struct extent_position *epos, u32 lenalloc) { struct super_block *sb = inode->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data); int len = sizeof(struct allocExtDesc); aed->lengthAllocDescs = cpu_to_le32(lenalloc); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201) len += lenalloc; udf_update_tag(epos->bh->b_data, len); mark_buffer_dirty_inode(epos->bh, inode); } /* * Truncate extents of inode to inode->i_size. This function can be used only * for making file shorter. For making file longer, udf_extend_file() has to * be used. */ void udf_truncate_extents(struct inode *inode) { struct extent_position epos; struct kernel_lb_addr eloc, neloc = {}; uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; loff_t byte_offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); byte_offset = (offset << sb->s_blocksize_bits) + (inode->i_size & (sb->s_blocksize - 1)); if (etype == -1) { /* We should extend the file? */ WARN_ON(byte_offset); return; } epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset); epos.offset += adsize; if (byte_offset) lenalloc = epos.offset; else lenalloc = epos.offset - adsize; if (!epos.bh) lenalloc -= udf_file_entry_alloc_offset(inode); else lenalloc -= sizeof(struct allocExtDesc); while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) { if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { udf_write_aext(inode, &epos, &neloc, nelen, 0); if (indirect_ext_len) { /* We managed to free all extents in the * indirect extent - free it too */ BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); brelse(epos.bh); epos.offset = sizeof(struct allocExtDesc); epos.block = eloc; epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, &eloc, 0)); if (elen) indirect_ext_len = (elen + sb->s_blocksize - 1) >> sb->s_blocksize_bits; else indirect_ext_len = 1; } else { extent_trunc(inode, &epos, &eloc, etype, elen, 0); epos.offset += adsize; } } if (indirect_ext_len) { BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); }
gpl-2.0
timduru/tf101-kernel-tegra
arch/um/kernel/exitcode.c
8439
1688
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/types.h> #include <asm/uaccess.h> /* * If read and write race, the read will still atomically read a valid * value. */ int uml_exitcode = 0; static int exitcode_proc_show(struct seq_file *m, void *v) { int val; /* * Save uml_exitcode in a local so that we don't need to guarantee * that sprintf accesses it atomically. */ val = uml_exitcode; seq_printf(m, "%d\n", val); return 0; } static int exitcode_proc_open(struct inode *inode, struct file *file) { return single_open(file, exitcode_proc_show, NULL); } static ssize_t exitcode_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char *end, buf[sizeof("nnnnn\0")]; int tmp; if (copy_from_user(buf, buffer, count)) return -EFAULT; tmp = simple_strtol(buf, &end, 0); if ((*end != '\0') && !isspace(*end)) return -EINVAL; uml_exitcode = tmp; return count; } static const struct file_operations exitcode_proc_fops = { .owner = THIS_MODULE, .open = exitcode_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = exitcode_proc_write, }; static int make_proc_exitcode(void) { struct proc_dir_entry *ent; ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops); if (ent == NULL) { printk(KERN_WARNING "make_proc_exitcode : Failed to register " "/proc/exitcode\n"); return 0; } return 0; } __initcall(make_proc_exitcode);
gpl-2.0
SM-G920P/G92XP-R4_COI9
drivers/usb/host/whci/init.c
9719
5014
/* * Wireless Host Controller (WHC) initialization. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" /* * Reset the host controller. */ static void whc_hw_reset(struct whc *whc) { le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, 100, "reset"); } static void whc_hw_init_di_buf(struct whc *whc) { int d; /* Disable all entries in the Device Information buffer. */ for (d = 0; d < whc->n_devices; d++) whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); } static void whc_hw_init_dn_buf(struct whc *whc) { /* Clear the Device Notification buffer to ensure the V (valid) * bits are clear. */ memset(whc->dn_buf, 0, 4096); le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); } int whc_init(struct whc *whc) { u32 whcsparams; int ret, i; resource_size_t start, len; spin_lock_init(&whc->lock); mutex_init(&whc->mutex); init_waitqueue_head(&whc->cmd_wq); init_waitqueue_head(&whc->async_list_wq); init_waitqueue_head(&whc->periodic_list_wq); whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev)); if (whc->workqueue == NULL) { ret = -ENOMEM; goto error; } INIT_WORK(&whc->dn_work, whc_dn_work); INIT_WORK(&whc->async_work, scan_async_work); INIT_LIST_HEAD(&whc->async_list); INIT_LIST_HEAD(&whc->async_removed_list); INIT_WORK(&whc->periodic_work, scan_periodic_work); for (i = 0; i < 5; i++) INIT_LIST_HEAD(&whc->periodic_list[i]); INIT_LIST_HEAD(&whc->periodic_removed_list); /* Map HC registers. */ start = whc->umc->resource.start; len = whc->umc->resource.end - start + 1; if (!request_mem_region(start, len, "whci-hc")) { dev_err(&whc->umc->dev, "can't request HC region\n"); ret = -EBUSY; goto error; } whc->base_phys = start; whc->base = ioremap(start, len); if (!whc->base) { dev_err(&whc->umc->dev, "ioremap\n"); ret = -ENOMEM; goto error; } whc_hw_reset(whc); /* Read maximum number of devices, keys and MMC IEs. */ whcsparams = le_readl(whc->base + WHCSPARAMS); whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", whc->n_devices, whc->n_keys, whc->n_mmc_ies); whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, sizeof(struct whc_qset), 64, 0); if (whc->qset_pool == NULL) { ret = -ENOMEM; goto error; } ret = asl_init(whc); if (ret < 0) goto error; ret = pzl_init(whc); if (ret < 0) goto error; /* Allocate and initialize a buffer for generic commands, the Device Information buffer, and the Device Notification buffer. */ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, &whc->gen_cmd_buf_dma, GFP_KERNEL); if (whc->gen_cmd_buf == NULL) { ret = -ENOMEM; goto error; } whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, &whc->dn_buf_dma, GFP_KERNEL); if (!whc->dn_buf) { ret = -ENOMEM; goto error; } whc_hw_init_dn_buf(whc); whc->di_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, &whc->di_buf_dma, GFP_KERNEL); if (!whc->di_buf) { ret = -ENOMEM; goto error; } whc_hw_init_di_buf(whc); return 0; error: whc_clean_up(whc); return ret; } void whc_clean_up(struct whc *whc) { resource_size_t len; if (whc->di_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, whc->di_buf, whc->di_buf_dma); if (whc->dn_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, whc->dn_buf, whc->dn_buf_dma); if (whc->gen_cmd_buf) dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, whc->gen_cmd_buf, whc->gen_cmd_buf_dma); pzl_clean_up(whc); asl_clean_up(whc); if (whc->qset_pool) dma_pool_destroy(whc->qset_pool); len = resource_size(&whc->umc->resource); if (whc->base) iounmap(whc->base); if (whc->base_phys) release_mem_region(whc->base_phys, len); if (whc->workqueue) destroy_workqueue(whc->workqueue); }
gpl-2.0
softirq/linux-2.6.32.60
drivers/char/hw_random/mxc-rnga.c
9719
5804
/* * RNG driver for Freescale RNGA * * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Author: Alan Carvalho de Assis <acassis@gmail.com> */ /* * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html * * This driver is based on other RNG drivers. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/io.h> /* RNGA Registers */ #define RNGA_CONTROL 0x00 #define RNGA_STATUS 0x04 #define RNGA_ENTROPY 0x08 #define RNGA_OUTPUT_FIFO 0x0c #define RNGA_MODE 0x10 #define RNGA_VERIFICATION_CONTROL 0x14 #define RNGA_OSC_CONTROL_COUNTER 0x18 #define RNGA_OSC1_COUNTER 0x1c #define RNGA_OSC2_COUNTER 0x20 #define RNGA_OSC_COUNTER_STATUS 0x24 /* RNGA Registers Range */ #define RNG_ADDR_RANGE 0x28 /* RNGA Control Register */ #define RNGA_CONTROL_SLEEP 0x00000010 #define RNGA_CONTROL_CLEAR_INT 0x00000008 #define RNGA_CONTROL_MASK_INTS 0x00000004 #define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002 #define RNGA_CONTROL_GO 0x00000001 #define RNGA_STATUS_LEVEL_MASK 0x0000ff00 /* RNGA Status Register */ #define RNGA_STATUS_OSC_DEAD 0x80000000 #define RNGA_STATUS_SLEEP 0x00000010 #define RNGA_STATUS_ERROR_INT 0x00000008 #define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004 #define RNGA_STATUS_LAST_READ_STATUS 0x00000002 #define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 static struct platform_device *rng_dev; static int mxc_rnga_data_present(struct hwrng *rng) { int level; void __iomem *rng_base = (void __iomem *)rng->priv; /* how many random numbers is in FIFO? [0-16] */ level = ((__raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_LEVEL_MASK) >> 8); return level > 0 ? 1 : 0; } static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) { int err; u32 ctrl; void __iomem *rng_base = (void __iomem *)rng->priv; /* retrieve a random number from FIFO */ *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO); /* some error while reading this random number? */ err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; /* if error: clear error interrupt, but doesn't return random number */ if (err) { dev_dbg(&rng_dev->dev, "Error while reading random number!\n"); ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, rng_base + RNGA_CONTROL); return 0; } else return 4; } static int mxc_rnga_init(struct hwrng *rng) { u32 ctrl, osc; void __iomem *rng_base = (void __iomem *)rng->priv; /* wake up */ ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL); /* verify if oscillator is working */ osc = __raw_readl(rng_base + RNGA_STATUS); if (osc & RNGA_STATUS_OSC_DEAD) { dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n"); return -ENODEV; } /* go running */ ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); return 0; } static void mxc_rnga_cleanup(struct hwrng *rng) { u32 ctrl; void __iomem *rng_base = (void __iomem *)rng->priv; ctrl = __raw_readl(rng_base + RNGA_CONTROL); /* stop rnga */ __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); } static struct hwrng mxc_rnga = { .name = "mxc-rnga", .init = mxc_rnga_init, .cleanup = mxc_rnga_cleanup, .data_present = mxc_rnga_data_present, .data_read = mxc_rnga_data_read }; static int __init mxc_rnga_probe(struct platform_device *pdev) { int err = -ENODEV; struct clk *clk; struct resource *res, *mem; void __iomem *rng_base = NULL; if (rng_dev) return -EBUSY; clk = clk_get(&pdev->dev, "rng"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); err = PTR_ERR(clk); goto out; } clk_enable(clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENOENT; goto err_region; } mem = request_mem_region(res->start, resource_size(res), pdev->name); if (mem == NULL) { err = -EBUSY; goto err_region; } rng_base = ioremap(res->start, resource_size(res)); if (!rng_base) { err = -ENOMEM; goto err_ioremap; } mxc_rnga.priv = (unsigned long)rng_base; err = hwrng_register(&mxc_rnga); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); goto err_register; } rng_dev = pdev; dev_info(&pdev->dev, "MXC RNGA Registered.\n"); return 0; err_register: iounmap(rng_base); rng_base = NULL; err_ioremap: release_mem_region(res->start, resource_size(res)); err_region: clk_disable(clk); clk_put(clk); out: return err; } static int __exit mxc_rnga_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *rng_base = (void __iomem *)mxc_rnga.priv; struct clk *clk = clk_get(&pdev->dev, "rng"); hwrng_unregister(&mxc_rnga); iounmap(rng_base); release_mem_region(res->start, resource_size(res)); clk_disable(clk); clk_put(clk); return 0; } static struct platform_driver mxc_rnga_driver = { .driver = { .name = "mxc_rnga", .owner = THIS_MODULE, }, .remove = __exit_p(mxc_rnga_remove), }; static int __init mod_init(void) { return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe); } static void __exit mod_exit(void) { platform_driver_unregister(&mxc_rnga_driver); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); MODULE_LICENSE("GPL");
gpl-2.0
leonardoafa/android_kernel_motorola_msm8974
sound/pci/vx222/vx222_ops.c
12535
35542
/* * Driver for Digigram VX222 V2/Mic soundcards * * VX222-specific low-level routines * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <asm/io.h> #include "vx222.h" static int vx2_reg_offset[VX_REG_MAX] = { [VX_ICR] = 0x00, [VX_CVR] = 0x04, [VX_ISR] = 0x08, [VX_IVR] = 0x0c, [VX_RXH] = 0x14, [VX_RXM] = 0x18, [VX_RXL] = 0x1c, [VX_DMA] = 0x10, [VX_CDSP] = 0x20, [VX_CFG] = 0x24, [VX_RUER] = 0x28, [VX_DATA] = 0x2c, [VX_STATUS] = 0x30, [VX_LOFREQ] = 0x34, [VX_HIFREQ] = 0x38, [VX_CSUER] = 0x3c, [VX_SELMIC] = 0x40, [VX_COMPOT] = 0x44, // Write: POTENTIOMETER ; Read: COMPRESSION LEVEL activate [VX_SCOMPR] = 0x48, // Read: COMPRESSION THRESHOLD activate [VX_GLIMIT] = 0x4c, // Read: LEVEL LIMITATION activate [VX_INTCSR] = 0x4c, // VX_INTCSR_REGISTER_OFFSET [VX_CNTRL] = 0x50, // VX_CNTRL_REGISTER_OFFSET [VX_GPIOC] = 0x54, // VX_GPIOC (new with PLX9030) }; static int vx2_reg_index[VX_REG_MAX] = { [VX_ICR] = 1, [VX_CVR] = 1, [VX_ISR] = 1, [VX_IVR] = 1, [VX_RXH] = 1, [VX_RXM] = 1, [VX_RXL] = 1, [VX_DMA] = 1, [VX_CDSP] = 1, [VX_CFG] = 1, [VX_RUER] = 1, [VX_DATA] = 1, [VX_STATUS] = 1, [VX_LOFREQ] = 1, [VX_HIFREQ] = 1, [VX_CSUER] = 1, [VX_SELMIC] = 1, [VX_COMPOT] = 1, [VX_SCOMPR] = 1, [VX_GLIMIT] = 1, [VX_INTCSR] = 0, /* on the PLX */ [VX_CNTRL] = 0, /* on the PLX */ [VX_GPIOC] = 0, /* on the PLX */ }; static inline unsigned long vx2_reg_addr(struct vx_core *_chip, int reg) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; return chip->port[vx2_reg_index[reg]] + vx2_reg_offset[reg]; } /** * snd_vx_inb - read a byte from the register * @offset: register enum */ static unsigned char vx2_inb(struct vx_core *chip, int offset) { return inb(vx2_reg_addr(chip, offset)); } /** * snd_vx_outb - write a byte on the register * @offset: the register offset * @val: the value to write */ static void vx2_outb(struct vx_core *chip, int offset, unsigned char val) { outb(val, vx2_reg_addr(chip, offset)); /* printk(KERN_DEBUG "outb: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ } /** * snd_vx_inl - read a 32bit word from the register * @offset: register enum */ static unsigned int vx2_inl(struct vx_core *chip, int offset) { return inl(vx2_reg_addr(chip, offset)); } /** * snd_vx_outl - write a 32bit word on the register * @offset: the register enum * @val: the value to write */ static void vx2_outl(struct vx_core *chip, int offset, unsigned int val) { /* printk(KERN_DEBUG "outl: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ outl(val, vx2_reg_addr(chip, offset)); } /* * redefine macros to call directly */ #undef vx_inb #define vx_inb(chip,reg) vx2_inb((struct vx_core*)(chip), VX_##reg) #undef vx_outb #define vx_outb(chip,reg,val) vx2_outb((struct vx_core*)(chip), VX_##reg, val) #undef vx_inl #define vx_inl(chip,reg) vx2_inl((struct vx_core*)(chip), VX_##reg) #undef vx_outl #define vx_outl(chip,reg,val) vx2_outl((struct vx_core*)(chip), VX_##reg, val) /* * vx_reset_dsp - reset the DSP */ #define XX_DSP_RESET_WAIT_TIME 2 /* ms */ static void vx2_reset_dsp(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* set the reset dsp bit to 0 */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_DSP_RESET_MASK); mdelay(XX_DSP_RESET_WAIT_TIME); chip->regCDSP |= VX_CDSP_DSP_RESET_MASK; /* set the reset dsp bit to 1 */ vx_outl(chip, CDSP, chip->regCDSP); } static int vx2_test_xilinx(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; unsigned int data; snd_printdd("testing xilinx...\n"); /* This test uses several write/read sequences on TEST0 and TEST1 bits * to figure out whever or not the xilinx was correctly loaded */ /* We write 1 on CDSP.TEST0. We should get 0 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST0_MASK) == VX_STATUS_VAL_TEST0_MASK) { snd_printdd("bad!\n"); return -ENODEV; } /* We write 0 on CDSP.TEST0. We should get 1 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST0_MASK)) { snd_printdd("bad! #2\n"); return -ENODEV; } if (_chip->type == VX_TYPE_BOARD) { /* not implemented on VX_2_BOARDS */ /* We write 1 on CDSP.TEST1. We should get 0 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST1_MASK) == VX_STATUS_VAL_TEST1_MASK) { snd_printdd("bad! #3\n"); return -ENODEV; } /* We write 0 on CDSP.TEST1. We should get 1 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST1_MASK)) { snd_printdd("bad! #4\n"); return -ENODEV; } } snd_printdd("ok, xilinx fine.\n"); return 0; } /** * vx_setup_pseudo_dma - set up the pseudo dma read/write mode. * @do_write: 0 = read, 1 = set up for DMA write */ static void vx2_setup_pseudo_dma(struct vx_core *chip, int do_write) { /* Interrupt mode and HREQ pin enabled for host transmit data transfers * (in case of the use of the pseudo-dma facility). */ vx_outl(chip, ICR, do_write ? ICR_TREQ : ICR_RREQ); /* Reset the pseudo-dma register (in case of the use of the * pseudo-dma facility). */ vx_outl(chip, RESET_DMA, 0); } /* * vx_release_pseudo_dma - disable the pseudo-DMA mode */ static inline void vx2_release_pseudo_dma(struct vx_core *chip) { /* HREQ pin disabled. */ vx_outl(chip, ICR, 0); } /* pseudo-dma write */ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { unsigned long port = vx2_reg_addr(chip, VX_DMA); int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 1); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } vx2_release_pseudo_dma(chip); } /* pseudo dma read */ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); unsigned long port = vx2_reg_addr(chip, VX_DMA); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 0); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) *addr++ = le32_to_cpu(inl(port)); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) *addr++ = le32_to_cpu(inl(port)); vx2_release_pseudo_dma(chip); } #define VX_XILINX_RESET_MASK 0x40000000 #define VX_USERBIT0_MASK 0x00000004 #define VX_USERBIT1_MASK 0x00000020 #define VX_CNTRL_REGISTER_VALUE 0x00172012 /* * transfer counts bits to PLX */ static int put_xilinx_data(struct vx_core *chip, unsigned int port, unsigned int counts, unsigned char data) { unsigned int i; for (i = 0; i < counts; i++) { unsigned int val; /* set the clock bit to 0. */ val = VX_CNTRL_REGISTER_VALUE & ~VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); if (data & (1 << i)) val |= VX_USERBIT1_MASK; else val &= ~VX_USERBIT1_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); /* set the clock bit to 1. */ val |= VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); } return 0; } /* * load the xilinx image */ static int vx2_load_xilinx_binary(struct vx_core *chip, const struct firmware *xilinx) { unsigned int i; unsigned int port; const unsigned char *image; /* XILINX reset (wait at least 1 millisecond between reset on and off). */ vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE | VX_XILINX_RESET_MASK); vx_inl(chip, CNTRL); msleep(10); vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE); vx_inl(chip, CNTRL); msleep(10); if (chip->type == VX_TYPE_BOARD) port = VX_CNTRL; else port = VX_GPIOC; /* VX222 V2 and VX222_MIC_BOARD with new PLX9030 use this register */ image = xilinx->data; for (i = 0; i < xilinx->size; i++, image++) { if (put_xilinx_data(chip, port, 8, *image) < 0) return -EINVAL; /* don't take too much time in this loop... */ cond_resched(); } put_xilinx_data(chip, port, 4, 0xff); /* end signature */ msleep(200); /* test after loading (is buggy with VX222) */ if (chip->type != VX_TYPE_BOARD) { /* Test if load successful: test bit 8 of register GPIOC (VX222: use CNTRL) ! */ i = vx_inl(chip, GPIOC); if (i & 0x0100) return 0; snd_printk(KERN_ERR "vx222: xilinx test failed after load, GPIOC=0x%x\n", i); return -EINVAL; } return 0; } /* * load the boot/dsp images */ static int vx2_load_dsp(struct vx_core *vx, int index, const struct firmware *dsp) { int err; switch (index) { case 1: /* xilinx image */ if ((err = vx2_load_xilinx_binary(vx, dsp)) < 0) return err; if ((err = vx2_test_xilinx(vx)) < 0) return err; return 0; case 2: /* DSP boot */ return snd_vx_dsp_boot(vx, dsp); case 3: /* DSP image */ return snd_vx_dsp_load(vx, dsp); default: snd_BUG(); return -EINVAL; } } /* * vx_test_and_ack - test and acknowledge interrupt * * called from irq hander, too * * spinlock held! */ static int vx2_test_and_ack(struct vx_core *chip) { /* not booted yet? */ if (! (chip->chip_status & VX_STAT_XILINX_LOADED)) return -ENXIO; if (! (vx_inl(chip, STATUS) & VX_STATUS_MEMIRQ_MASK)) return -EIO; /* ok, interrupts generated, now ack it */ /* set ACQUIT bit up and down */ vx_outl(chip, STATUS, 0); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* ack */ vx_outl(chip, STATUS, VX_STATUS_MEMIRQ_MASK); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* clear */ vx_outl(chip, STATUS, 0); return 0; } /* * vx_validate_irq - enable/disable IRQ */ static void vx2_validate_irq(struct vx_core *_chip, int enable) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the interrupt enable bit to 1 in CDSP register */ if (enable) { /* Set the PCI interrupt enable bit to 1.*/ vx_outl(chip, INTCSR, VX_INTCSR_VALUE|VX_PCI_INTERRUPT_MASK); chip->regCDSP |= VX_CDSP_VALID_IRQ_MASK; } else { /* Set the PCI interrupt enable bit to 0. */ vx_outl(chip, INTCSR, VX_INTCSR_VALUE&~VX_PCI_INTERRUPT_MASK); chip->regCDSP &= ~VX_CDSP_VALID_IRQ_MASK; } vx_outl(chip, CDSP, chip->regCDSP); } /* * write an AKM codec data (24bit) */ static void vx2_write_codec_reg(struct vx_core *chip, unsigned int data) { unsigned int i; vx_inl(chip, HIFREQ); /* We have to send 24 bits (3 x 8 bits). Start with most signif. Bit */ for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } #define AKM_CODEC_POWER_CONTROL_CMD 0xA007 #define AKM_CODEC_RESET_ON_CMD 0xA100 #define AKM_CODEC_RESET_OFF_CMD 0xA103 #define AKM_CODEC_CLOCK_FORMAT_CMD 0xA240 #define AKM_CODEC_MUTE_CMD 0xA38D #define AKM_CODEC_UNMUTE_CMD 0xA30D #define AKM_CODEC_LEFT_LEVEL_CMD 0xA400 #define AKM_CODEC_RIGHT_LEVEL_CMD 0xA500 static const u8 vx2_akm_gains_lut[VX2_AKM_LEVEL_MAX+1] = { 0x7f, // [000] = +0.000 dB -> AKM(0x7f) = +0.000 dB error(+0.000 dB) 0x7d, // [001] = -0.500 dB -> AKM(0x7d) = -0.572 dB error(-0.072 dB) 0x7c, // [002] = -1.000 dB -> AKM(0x7c) = -0.873 dB error(+0.127 dB) 0x7a, // [003] = -1.500 dB -> AKM(0x7a) = -1.508 dB error(-0.008 dB) 0x79, // [004] = -2.000 dB -> AKM(0x79) = -1.844 dB error(+0.156 dB) 0x77, // [005] = -2.500 dB -> AKM(0x77) = -2.557 dB error(-0.057 dB) 0x76, // [006] = -3.000 dB -> AKM(0x76) = -2.937 dB error(+0.063 dB) 0x75, // [007] = -3.500 dB -> AKM(0x75) = -3.334 dB error(+0.166 dB) 0x73, // [008] = -4.000 dB -> AKM(0x73) = -4.188 dB error(-0.188 dB) 0x72, // [009] = -4.500 dB -> AKM(0x72) = -4.648 dB error(-0.148 dB) 0x71, // [010] = -5.000 dB -> AKM(0x71) = -5.134 dB error(-0.134 dB) 0x70, // [011] = -5.500 dB -> AKM(0x70) = -5.649 dB error(-0.149 dB) 0x6f, // [012] = -6.000 dB -> AKM(0x6f) = -6.056 dB error(-0.056 dB) 0x6d, // [013] = -6.500 dB -> AKM(0x6d) = -6.631 dB error(-0.131 dB) 0x6c, // [014] = -7.000 dB -> AKM(0x6c) = -6.933 dB error(+0.067 dB) 0x6a, // [015] = -7.500 dB -> AKM(0x6a) = -7.571 dB error(-0.071 dB) 0x69, // [016] = -8.000 dB -> AKM(0x69) = -7.909 dB error(+0.091 dB) 0x67, // [017] = -8.500 dB -> AKM(0x67) = -8.626 dB error(-0.126 dB) 0x66, // [018] = -9.000 dB -> AKM(0x66) = -9.008 dB error(-0.008 dB) 0x65, // [019] = -9.500 dB -> AKM(0x65) = -9.407 dB error(+0.093 dB) 0x64, // [020] = -10.000 dB -> AKM(0x64) = -9.826 dB error(+0.174 dB) 0x62, // [021] = -10.500 dB -> AKM(0x62) = -10.730 dB error(-0.230 dB) 0x61, // [022] = -11.000 dB -> AKM(0x61) = -11.219 dB error(-0.219 dB) 0x60, // [023] = -11.500 dB -> AKM(0x60) = -11.738 dB error(-0.238 dB) 0x5f, // [024] = -12.000 dB -> AKM(0x5f) = -12.149 dB error(-0.149 dB) 0x5e, // [025] = -12.500 dB -> AKM(0x5e) = -12.434 dB error(+0.066 dB) 0x5c, // [026] = -13.000 dB -> AKM(0x5c) = -13.033 dB error(-0.033 dB) 0x5b, // [027] = -13.500 dB -> AKM(0x5b) = -13.350 dB error(+0.150 dB) 0x59, // [028] = -14.000 dB -> AKM(0x59) = -14.018 dB error(-0.018 dB) 0x58, // [029] = -14.500 dB -> AKM(0x58) = -14.373 dB error(+0.127 dB) 0x56, // [030] = -15.000 dB -> AKM(0x56) = -15.130 dB error(-0.130 dB) 0x55, // [031] = -15.500 dB -> AKM(0x55) = -15.534 dB error(-0.034 dB) 0x54, // [032] = -16.000 dB -> AKM(0x54) = -15.958 dB error(+0.042 dB) 0x53, // [033] = -16.500 dB -> AKM(0x53) = -16.404 dB error(+0.096 dB) 0x52, // [034] = -17.000 dB -> AKM(0x52) = -16.874 dB error(+0.126 dB) 0x51, // [035] = -17.500 dB -> AKM(0x51) = -17.371 dB error(+0.129 dB) 0x50, // [036] = -18.000 dB -> AKM(0x50) = -17.898 dB error(+0.102 dB) 0x4e, // [037] = -18.500 dB -> AKM(0x4e) = -18.605 dB error(-0.105 dB) 0x4d, // [038] = -19.000 dB -> AKM(0x4d) = -18.905 dB error(+0.095 dB) 0x4b, // [039] = -19.500 dB -> AKM(0x4b) = -19.538 dB error(-0.038 dB) 0x4a, // [040] = -20.000 dB -> AKM(0x4a) = -19.872 dB error(+0.128 dB) 0x48, // [041] = -20.500 dB -> AKM(0x48) = -20.583 dB error(-0.083 dB) 0x47, // [042] = -21.000 dB -> AKM(0x47) = -20.961 dB error(+0.039 dB) 0x46, // [043] = -21.500 dB -> AKM(0x46) = -21.356 dB error(+0.144 dB) 0x44, // [044] = -22.000 dB -> AKM(0x44) = -22.206 dB error(-0.206 dB) 0x43, // [045] = -22.500 dB -> AKM(0x43) = -22.664 dB error(-0.164 dB) 0x42, // [046] = -23.000 dB -> AKM(0x42) = -23.147 dB error(-0.147 dB) 0x41, // [047] = -23.500 dB -> AKM(0x41) = -23.659 dB error(-0.159 dB) 0x40, // [048] = -24.000 dB -> AKM(0x40) = -24.203 dB error(-0.203 dB) 0x3f, // [049] = -24.500 dB -> AKM(0x3f) = -24.635 dB error(-0.135 dB) 0x3e, // [050] = -25.000 dB -> AKM(0x3e) = -24.935 dB error(+0.065 dB) 0x3c, // [051] = -25.500 dB -> AKM(0x3c) = -25.569 dB error(-0.069 dB) 0x3b, // [052] = -26.000 dB -> AKM(0x3b) = -25.904 dB error(+0.096 dB) 0x39, // [053] = -26.500 dB -> AKM(0x39) = -26.615 dB error(-0.115 dB) 0x38, // [054] = -27.000 dB -> AKM(0x38) = -26.994 dB error(+0.006 dB) 0x37, // [055] = -27.500 dB -> AKM(0x37) = -27.390 dB error(+0.110 dB) 0x36, // [056] = -28.000 dB -> AKM(0x36) = -27.804 dB error(+0.196 dB) 0x34, // [057] = -28.500 dB -> AKM(0x34) = -28.699 dB error(-0.199 dB) 0x33, // [058] = -29.000 dB -> AKM(0x33) = -29.183 dB error(-0.183 dB) 0x32, // [059] = -29.500 dB -> AKM(0x32) = -29.696 dB error(-0.196 dB) 0x31, // [060] = -30.000 dB -> AKM(0x31) = -30.241 dB error(-0.241 dB) 0x31, // [061] = -30.500 dB -> AKM(0x31) = -30.241 dB error(+0.259 dB) 0x30, // [062] = -31.000 dB -> AKM(0x30) = -30.823 dB error(+0.177 dB) 0x2e, // [063] = -31.500 dB -> AKM(0x2e) = -31.610 dB error(-0.110 dB) 0x2d, // [064] = -32.000 dB -> AKM(0x2d) = -31.945 dB error(+0.055 dB) 0x2b, // [065] = -32.500 dB -> AKM(0x2b) = -32.659 dB error(-0.159 dB) 0x2a, // [066] = -33.000 dB -> AKM(0x2a) = -33.038 dB error(-0.038 dB) 0x29, // [067] = -33.500 dB -> AKM(0x29) = -33.435 dB error(+0.065 dB) 0x28, // [068] = -34.000 dB -> AKM(0x28) = -33.852 dB error(+0.148 dB) 0x27, // [069] = -34.500 dB -> AKM(0x27) = -34.289 dB error(+0.211 dB) 0x25, // [070] = -35.000 dB -> AKM(0x25) = -35.235 dB error(-0.235 dB) 0x24, // [071] = -35.500 dB -> AKM(0x24) = -35.750 dB error(-0.250 dB) 0x24, // [072] = -36.000 dB -> AKM(0x24) = -35.750 dB error(+0.250 dB) 0x23, // [073] = -36.500 dB -> AKM(0x23) = -36.297 dB error(+0.203 dB) 0x22, // [074] = -37.000 dB -> AKM(0x22) = -36.881 dB error(+0.119 dB) 0x21, // [075] = -37.500 dB -> AKM(0x21) = -37.508 dB error(-0.008 dB) 0x20, // [076] = -38.000 dB -> AKM(0x20) = -38.183 dB error(-0.183 dB) 0x1f, // [077] = -38.500 dB -> AKM(0x1f) = -38.726 dB error(-0.226 dB) 0x1e, // [078] = -39.000 dB -> AKM(0x1e) = -39.108 dB error(-0.108 dB) 0x1d, // [079] = -39.500 dB -> AKM(0x1d) = -39.507 dB error(-0.007 dB) 0x1c, // [080] = -40.000 dB -> AKM(0x1c) = -39.926 dB error(+0.074 dB) 0x1b, // [081] = -40.500 dB -> AKM(0x1b) = -40.366 dB error(+0.134 dB) 0x1a, // [082] = -41.000 dB -> AKM(0x1a) = -40.829 dB error(+0.171 dB) 0x19, // [083] = -41.500 dB -> AKM(0x19) = -41.318 dB error(+0.182 dB) 0x18, // [084] = -42.000 dB -> AKM(0x18) = -41.837 dB error(+0.163 dB) 0x17, // [085] = -42.500 dB -> AKM(0x17) = -42.389 dB error(+0.111 dB) 0x16, // [086] = -43.000 dB -> AKM(0x16) = -42.978 dB error(+0.022 dB) 0x15, // [087] = -43.500 dB -> AKM(0x15) = -43.610 dB error(-0.110 dB) 0x14, // [088] = -44.000 dB -> AKM(0x14) = -44.291 dB error(-0.291 dB) 0x14, // [089] = -44.500 dB -> AKM(0x14) = -44.291 dB error(+0.209 dB) 0x13, // [090] = -45.000 dB -> AKM(0x13) = -45.031 dB error(-0.031 dB) 0x12, // [091] = -45.500 dB -> AKM(0x12) = -45.840 dB error(-0.340 dB) 0x12, // [092] = -46.000 dB -> AKM(0x12) = -45.840 dB error(+0.160 dB) 0x11, // [093] = -46.500 dB -> AKM(0x11) = -46.731 dB error(-0.231 dB) 0x11, // [094] = -47.000 dB -> AKM(0x11) = -46.731 dB error(+0.269 dB) 0x10, // [095] = -47.500 dB -> AKM(0x10) = -47.725 dB error(-0.225 dB) 0x10, // [096] = -48.000 dB -> AKM(0x10) = -47.725 dB error(+0.275 dB) 0x0f, // [097] = -48.500 dB -> AKM(0x0f) = -48.553 dB error(-0.053 dB) 0x0e, // [098] = -49.000 dB -> AKM(0x0e) = -49.152 dB error(-0.152 dB) 0x0d, // [099] = -49.500 dB -> AKM(0x0d) = -49.796 dB error(-0.296 dB) 0x0d, // [100] = -50.000 dB -> AKM(0x0d) = -49.796 dB error(+0.204 dB) 0x0c, // [101] = -50.500 dB -> AKM(0x0c) = -50.491 dB error(+0.009 dB) 0x0b, // [102] = -51.000 dB -> AKM(0x0b) = -51.247 dB error(-0.247 dB) 0x0b, // [103] = -51.500 dB -> AKM(0x0b) = -51.247 dB error(+0.253 dB) 0x0a, // [104] = -52.000 dB -> AKM(0x0a) = -52.075 dB error(-0.075 dB) 0x0a, // [105] = -52.500 dB -> AKM(0x0a) = -52.075 dB error(+0.425 dB) 0x09, // [106] = -53.000 dB -> AKM(0x09) = -52.990 dB error(+0.010 dB) 0x09, // [107] = -53.500 dB -> AKM(0x09) = -52.990 dB error(+0.510 dB) 0x08, // [108] = -54.000 dB -> AKM(0x08) = -54.013 dB error(-0.013 dB) 0x08, // [109] = -54.500 dB -> AKM(0x08) = -54.013 dB error(+0.487 dB) 0x07, // [110] = -55.000 dB -> AKM(0x07) = -55.173 dB error(-0.173 dB) 0x07, // [111] = -55.500 dB -> AKM(0x07) = -55.173 dB error(+0.327 dB) 0x06, // [112] = -56.000 dB -> AKM(0x06) = -56.512 dB error(-0.512 dB) 0x06, // [113] = -56.500 dB -> AKM(0x06) = -56.512 dB error(-0.012 dB) 0x06, // [114] = -57.000 dB -> AKM(0x06) = -56.512 dB error(+0.488 dB) 0x05, // [115] = -57.500 dB -> AKM(0x05) = -58.095 dB error(-0.595 dB) 0x05, // [116] = -58.000 dB -> AKM(0x05) = -58.095 dB error(-0.095 dB) 0x05, // [117] = -58.500 dB -> AKM(0x05) = -58.095 dB error(+0.405 dB) 0x05, // [118] = -59.000 dB -> AKM(0x05) = -58.095 dB error(+0.905 dB) 0x04, // [119] = -59.500 dB -> AKM(0x04) = -60.034 dB error(-0.534 dB) 0x04, // [120] = -60.000 dB -> AKM(0x04) = -60.034 dB error(-0.034 dB) 0x04, // [121] = -60.500 dB -> AKM(0x04) = -60.034 dB error(+0.466 dB) 0x04, // [122] = -61.000 dB -> AKM(0x04) = -60.034 dB error(+0.966 dB) 0x03, // [123] = -61.500 dB -> AKM(0x03) = -62.532 dB error(-1.032 dB) 0x03, // [124] = -62.000 dB -> AKM(0x03) = -62.532 dB error(-0.532 dB) 0x03, // [125] = -62.500 dB -> AKM(0x03) = -62.532 dB error(-0.032 dB) 0x03, // [126] = -63.000 dB -> AKM(0x03) = -62.532 dB error(+0.468 dB) 0x03, // [127] = -63.500 dB -> AKM(0x03) = -62.532 dB error(+0.968 dB) 0x03, // [128] = -64.000 dB -> AKM(0x03) = -62.532 dB error(+1.468 dB) 0x02, // [129] = -64.500 dB -> AKM(0x02) = -66.054 dB error(-1.554 dB) 0x02, // [130] = -65.000 dB -> AKM(0x02) = -66.054 dB error(-1.054 dB) 0x02, // [131] = -65.500 dB -> AKM(0x02) = -66.054 dB error(-0.554 dB) 0x02, // [132] = -66.000 dB -> AKM(0x02) = -66.054 dB error(-0.054 dB) 0x02, // [133] = -66.500 dB -> AKM(0x02) = -66.054 dB error(+0.446 dB) 0x02, // [134] = -67.000 dB -> AKM(0x02) = -66.054 dB error(+0.946 dB) 0x02, // [135] = -67.500 dB -> AKM(0x02) = -66.054 dB error(+1.446 dB) 0x02, // [136] = -68.000 dB -> AKM(0x02) = -66.054 dB error(+1.946 dB) 0x02, // [137] = -68.500 dB -> AKM(0x02) = -66.054 dB error(+2.446 dB) 0x02, // [138] = -69.000 dB -> AKM(0x02) = -66.054 dB error(+2.946 dB) 0x01, // [139] = -69.500 dB -> AKM(0x01) = -72.075 dB error(-2.575 dB) 0x01, // [140] = -70.000 dB -> AKM(0x01) = -72.075 dB error(-2.075 dB) 0x01, // [141] = -70.500 dB -> AKM(0x01) = -72.075 dB error(-1.575 dB) 0x01, // [142] = -71.000 dB -> AKM(0x01) = -72.075 dB error(-1.075 dB) 0x01, // [143] = -71.500 dB -> AKM(0x01) = -72.075 dB error(-0.575 dB) 0x01, // [144] = -72.000 dB -> AKM(0x01) = -72.075 dB error(-0.075 dB) 0x01, // [145] = -72.500 dB -> AKM(0x01) = -72.075 dB error(+0.425 dB) 0x01, // [146] = -73.000 dB -> AKM(0x01) = -72.075 dB error(+0.925 dB) 0x00}; // [147] = -73.500 dB -> AKM(0x00) = mute error(+infini) /* * pseudo-codec write entry */ static void vx2_write_akm(struct vx_core *chip, int reg, unsigned int data) { unsigned int val; if (reg == XX_CODEC_DAC_CONTROL_REGISTER) { vx2_write_codec_reg(chip, data ? AKM_CODEC_MUTE_CMD : AKM_CODEC_UNMUTE_CMD); return; } /* `data' is a value between 0x0 and VX2_AKM_LEVEL_MAX = 0x093, in the case of the AKM codecs, we need a look up table, as there is no linear matching between the driver codec values and the real dBu value */ if (snd_BUG_ON(data >= sizeof(vx2_akm_gains_lut))) return; switch (reg) { case XX_CODEC_LEVEL_LEFT_REGISTER: val = AKM_CODEC_LEFT_LEVEL_CMD; break; case XX_CODEC_LEVEL_RIGHT_REGISTER: val = AKM_CODEC_RIGHT_LEVEL_CMD; break; default: snd_BUG(); return; } val |= vx2_akm_gains_lut[data]; vx2_write_codec_reg(chip, val); } /* * write codec bit for old VX222 board */ static void vx2_old_write_codec_bit(struct vx_core *chip, int codec, unsigned int data) { int i; /* activate access to codec registers */ vx_inl(chip, HIFREQ); for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } /* * reset codec bit */ static void vx2_reset_codec(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the reset CODEC bit to 0. */ vx_outl(chip, CDSP, chip->regCDSP &~ VX_CDSP_CODEC_RESET_MASK); vx_inl(chip, CDSP); msleep(10); /* Set the reset CODEC bit to 1. */ chip->regCDSP |= VX_CDSP_CODEC_RESET_MASK; vx_outl(chip, CDSP, chip->regCDSP); vx_inl(chip, CDSP); if (_chip->type == VX_TYPE_BOARD) { msleep(1); return; } msleep(5); /* additionnel wait time for AKM's */ vx2_write_codec_reg(_chip, AKM_CODEC_POWER_CONTROL_CMD); /* DAC power up, ADC power up, Vref power down */ vx2_write_codec_reg(_chip, AKM_CODEC_CLOCK_FORMAT_CMD); /* default */ vx2_write_codec_reg(_chip, AKM_CODEC_MUTE_CMD); /* Mute = ON ,Deemphasis = OFF */ vx2_write_codec_reg(_chip, AKM_CODEC_RESET_OFF_CMD); /* DAC and ADC normal operation */ if (_chip->type == VX_TYPE_MIC) { /* set up the micro input selector */ chip->regSELMIC = MICRO_SELECT_INPUT_NORM | MICRO_SELECT_PREAMPLI_G_0 | MICRO_SELECT_NOISE_T_52DB; /* reset phantom power supply */ chip->regSELMIC &= ~MICRO_SELECT_PHANTOM_ALIM; vx_outl(_chip, SELMIC, chip->regSELMIC); } } /* * change the audio source */ static void vx2_change_audio_source(struct vx_core *_chip, int src) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; switch (src) { case VX_AUDIO_SRC_DIGITAL: chip->regCFG |= VX_CFG_DATAIN_SEL_MASK; break; default: chip->regCFG &= ~VX_CFG_DATAIN_SEL_MASK; break; } vx_outl(chip, CFG, chip->regCFG); } /* * set the clock source */ static void vx2_set_clock_source(struct vx_core *_chip, int source) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (source == INTERNAL_QUARTZ) chip->regCFG &= ~VX_CFG_CLOCKIN_SEL_MASK; else chip->regCFG |= VX_CFG_CLOCKIN_SEL_MASK; vx_outl(chip, CFG, chip->regCFG); } /* * reset the board */ static void vx2_reset_board(struct vx_core *_chip, int cold_reset) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* initialize the register values */ chip->regCDSP = VX_CDSP_CODEC_RESET_MASK | VX_CDSP_DSP_RESET_MASK ; chip->regCFG = 0; } /* * input level controls for VX222 Mic */ /* Micro level is specified to be adjustable from -96dB to 63 dB (board coded 0x00 ... 318), * 318 = 210 + 36 + 36 + 36 (210 = +9dB variable) (3 * 36 = 3 steps of 18dB pre ampli) * as we will mute if less than -110dB, so let's simply use line input coded levels and add constant offset ! */ #define V2_MICRO_LEVEL_RANGE (318 - 255) static void vx2_set_input_level(struct snd_vx222 *chip) { int i, miclevel, preamp; unsigned int data; miclevel = chip->mic_level; miclevel += V2_MICRO_LEVEL_RANGE; /* add 318 - 0xff */ preamp = 0; while (miclevel > 210) { /* limitation to +9dB of 3310 real gain */ preamp++; /* raise pre ampli + 18dB */ miclevel -= (18 * 2); /* lower level 18 dB (*2 because of 0.5 dB steps !) */ } if (snd_BUG_ON(preamp >= 4)) return; /* set pre-amp level */ chip->regSELMIC &= ~MICRO_SELECT_PREAMPLI_MASK; chip->regSELMIC |= (preamp << MICRO_SELECT_PREAMPLI_OFFSET) & MICRO_SELECT_PREAMPLI_MASK; vx_outl(chip, SELMIC, chip->regSELMIC); data = (unsigned int)miclevel << 16 | (unsigned int)chip->input_level[1] << 8 | (unsigned int)chip->input_level[0]; vx_inl(chip, DATA); /* Activate input level programming */ /* We have to send 32 bits (4 x 8 bits) */ for (i = 0; i < 32; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x80000000) ? VX_DATA_CODEC_MASK : 0)); vx_inl(chip, RUER); /* Terminate input level programming */ } #define MIC_LEVEL_MAX 0xff static const DECLARE_TLV_DB_SCALE(db_scale_mic, -6450, 50, 0); /* * controls API for input levels */ /* input levels */ static int vx_input_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_input_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; mutex_lock(&_chip->mixer_mutex); ucontrol->value.integer.value[0] = chip->input_level[0]; ucontrol->value.integer.value[1] = chip->input_level[1]; mutex_unlock(&_chip->mixer_mutex); return 0; } static int vx_input_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; if (ucontrol->value.integer.value[1] < 0 || ucontrol->value.integer.value[1] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->input_level[0] != ucontrol->value.integer.value[0] || chip->input_level[1] != ucontrol->value.integer.value[1]) { chip->input_level[0] = ucontrol->value.integer.value[0]; chip->input_level[1] = ucontrol->value.integer.value[1]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } /* mic level */ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; ucontrol->value.integer.value[0] = chip->mic_level; return 0; } static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->mic_level != ucontrol->value.integer.value[0]) { chip->mic_level = ucontrol->value.integer.value[0]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } static struct snd_kcontrol_new vx_control_input_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = vx_input_level_info, .get = vx_input_level_get, .put = vx_input_level_put, .tlv = { .p = db_scale_mic }, }; static struct snd_kcontrol_new vx_control_mic_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Mic Capture Volume", .info = vx_mic_level_info, .get = vx_mic_level_get, .put = vx_mic_level_put, .tlv = { .p = db_scale_mic }, }; /* * FIXME: compressor/limiter implementation is missing yet... */ static int vx2_add_mic_controls(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; int err; if (_chip->type != VX_TYPE_MIC) return 0; /* mute input levels */ chip->input_level[0] = chip->input_level[1] = 0; chip->mic_level = 0; vx2_set_input_level(chip); /* controls */ if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_input_level, chip))) < 0) return err; if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0) return err; return 0; } /* * callbacks */ struct snd_vx_ops vx222_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .akm_write = vx2_write_akm, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, .add_controls = vx2_add_mic_controls, }; /* for old VX222 board */ struct snd_vx_ops vx222_old_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .write_codec = vx2_old_write_codec_bit, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, };
gpl-2.0
leitick/linux
drivers/net/ethernet/calxeda/xgmac.c
248
57621
/* * Copyright 2010-2011 Calxeda, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/circ_buf.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/slab.h> /* XGMAC Register definitions */ #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ #define XGMAC_VERSION 0x00000020 /* Version */ #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ #define XGMAC_DEBUG 0x00000038 /* Debug */ #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ #define XGMAC_NUM_HASH 16 #define XGMAC_OMR 0x00000400 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ /* Hardware TX Statistics Counters */ #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 #define XGMAC_MMC_TXBCFRAME_G 0x00000824 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 #define XGMAC_MMC_TXVLANFRAME 0x0000089C /* Hardware RX Statistics Counters */ #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 #define XGMAC_MMC_RXBCFRAME_G 0x00000918 #define XGMAC_MMC_RXMCFRAME_G 0x00000920 #define XGMAC_MMC_RXCRCERR 0x00000928 #define XGMAC_MMC_RXRUNT 0x00000930 #define XGMAC_MMC_RXJABBER 0x00000934 #define XGMAC_MMC_RXUCFRAME_G 0x00000970 #define XGMAC_MMC_RXLENGTHERR 0x00000978 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 #define XGMAC_MMC_RXOVERFLOW 0x00000990 #define XGMAC_MMC_RXVLANFRAME 0x00000998 #define XGMAC_MMC_RXWATCHDOG 0x000009a0 /* DMA Control and Status Registers */ #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ #define XGMAC_ADDR_AE 0x80000000 /* PMT Control and Status */ #define XGMAC_PMT_POINTER_RESET 0x80000000 #define XGMAC_PMT_GLBL_UNICAST 0x00000200 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 #define XGMAC_PMT_MAGIC_PKT 0x00000020 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 #define XGMAC_PMT_POWERDOWN 0x00000001 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ #define XGMAC_CONTROL_SPD_MASK 0x60000000 #define XGMAC_CONTROL_SPD_1G 0x60000000 #define XGMAC_CONTROL_SPD_2_5G 0x40000000 #define XGMAC_CONTROL_SPD_10G 0x00000000 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ #define XGMAC_CONTROL_SARK_MASK 0x18000000 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ #define XGMAC_CONTROL_CAR_MASK 0x06000000 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ /* XGMAC Frame Filter defines */ #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ /* XGMAC FLOW CTRL defines */ #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define XGMAC_FLOW_CTRL_PT_SHIFT 16 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ /* XGMAC_INT_STAT reg */ #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ /* DMA Bus Mode register defines */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ /* Programmable burst length */ #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ #define DMA_BUS_MODE_PBL_SHIFT 8 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 #define DMA_BUS_MODE_8PBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA Bus Mode register defines */ #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ #define DMA_BUS_PR_RATIO_SHIFT 14 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ /* DMA Control register defines */ #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */ /* DMA Normal interrupt */ #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE) #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ DMA_INTR_ENA_TSE) /* DMA default interrupt mask */ #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) /* DMA Status register defines */ #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ #define DMA_STATUS_TS_SHIFT 20 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ #define DMA_STATUS_RS_SHIFT 17 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ /* Common MAC defines */ #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ /* XGMAC Operation Mode Register */ #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ #define XGMAC_OMR_TTC_MASK 0x00030000 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */ #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ /* XGMAC HW Features Register */ #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 /* XGMAC Descriptor Defines */ #define MAX_DESC_BUF_SZ (0x2000 - 8) #define RXDESC_EXT_STATUS 0x00000001 #define RXDESC_CRC_ERR 0x00000002 #define RXDESC_RX_ERR 0x00000008 #define RXDESC_RX_WDOG 0x00000010 #define RXDESC_FRAME_TYPE 0x00000020 #define RXDESC_GIANT_FRAME 0x00000080 #define RXDESC_LAST_SEG 0x00000100 #define RXDESC_FIRST_SEG 0x00000200 #define RXDESC_VLAN_FRAME 0x00000400 #define RXDESC_OVERFLOW_ERR 0x00000800 #define RXDESC_LENGTH_ERR 0x00001000 #define RXDESC_SA_FILTER_FAIL 0x00002000 #define RXDESC_DESCRIPTOR_ERR 0x00004000 #define RXDESC_ERROR_SUMMARY 0x00008000 #define RXDESC_FRAME_LEN_OFFSET 16 #define RXDESC_FRAME_LEN_MASK 0x3fff0000 #define RXDESC_DA_FILTER_FAIL 0x40000000 #define RXDESC1_END_RING 0x00008000 #define RXDESC_IP_PAYLOAD_MASK 0x00000003 #define RXDESC_IP_PAYLOAD_UDP 0x00000001 #define RXDESC_IP_PAYLOAD_TCP 0x00000002 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 #define RXDESC_IP_HEADER_ERR 0x00000008 #define RXDESC_IP_PAYLOAD_ERR 0x00000010 #define RXDESC_IPV4_PACKET 0x00000040 #define RXDESC_IPV6_PACKET 0x00000080 #define TXDESC_UNDERFLOW_ERR 0x00000001 #define TXDESC_JABBER_TIMEOUT 0x00000002 #define TXDESC_LOCAL_FAULT 0x00000004 #define TXDESC_REMOTE_FAULT 0x00000008 #define TXDESC_VLAN_FRAME 0x00000010 #define TXDESC_FRAME_FLUSHED 0x00000020 #define TXDESC_IP_HEADER_ERR 0x00000040 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 #define TXDESC_ERROR_SUMMARY 0x00008000 #define TXDESC_SA_CTRL_INSERT 0x00040000 #define TXDESC_SA_CTRL_REPLACE 0x00080000 #define TXDESC_2ND_ADDR_CHAINED 0x00100000 #define TXDESC_END_RING 0x00200000 #define TXDESC_CSUM_IP 0x00400000 #define TXDESC_CSUM_IP_PAYLD 0x00800000 #define TXDESC_CSUM_ALL 0x00C00000 #define TXDESC_CRC_EN_REPLACE 0x01000000 #define TXDESC_CRC_EN_APPEND 0x02000000 #define TXDESC_DISABLE_PAD 0x04000000 #define TXDESC_FIRST_SEG 0x10000000 #define TXDESC_LAST_SEG 0x20000000 #define TXDESC_INTERRUPT 0x40000000 #define DESC_OWN 0x80000000 #define DESC_BUFFER1_SZ_MASK 0x00001fff #define DESC_BUFFER2_SZ_MASK 0x1fff0000 #define DESC_BUFFER2_SZ_OFFSET 16 struct xgmac_dma_desc { __le32 flags; __le32 buf_size; __le32 buf1_addr; /* Buffer 1 Address Pointer */ __le32 buf2_addr; /* Buffer 2 Address Pointer */ __le32 ext_status; __le32 res[3]; }; struct xgmac_extra_stats { /* Transmit errors */ unsigned long tx_jabber; unsigned long tx_frame_flushed; unsigned long tx_payload_error; unsigned long tx_ip_header_error; unsigned long tx_local_fault; unsigned long tx_remote_fault; /* Receive errors */ unsigned long rx_watchdog; unsigned long rx_da_filter_fail; unsigned long rx_payload_error; unsigned long rx_ip_header_error; /* Tx/Rx IRQ errors */ unsigned long tx_process_stopped; unsigned long rx_buf_unav; unsigned long rx_process_stopped; unsigned long tx_early; unsigned long fatal_bus_error; }; struct xgmac_priv { struct xgmac_dma_desc *dma_rx; struct sk_buff **rx_skbuff; unsigned int rx_tail; unsigned int rx_head; struct xgmac_dma_desc *dma_tx; struct sk_buff **tx_skbuff; unsigned int tx_head; unsigned int tx_tail; int tx_irq_cnt; void __iomem *base; unsigned int dma_buf_sz; dma_addr_t dma_rx_phy; dma_addr_t dma_tx_phy; struct net_device *dev; struct device *device; struct napi_struct napi; int max_macs; struct xgmac_extra_stats xstats; spinlock_t stats_lock; int pmt_irq; char rx_pause; char tx_pause; int wolopts; struct work_struct tx_timeout_work; }; /* XGMAC Configuration Settings */ #define MAX_MTU 9000 #define PAUSE_TIME 0x400 #define DMA_RX_RING_SZ 256 #define DMA_TX_RING_SZ 128 /* minimum number of free TX descriptors required to wake up TX process */ #define TX_THRESH (DMA_TX_RING_SZ/4) /* DMA descriptor ring helpers */ #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) #define tx_dma_ring_space(p) \ dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { if (buf_sz > MAX_DESC_BUF_SZ) p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); else p->buf_size = cpu_to_le32(buf_sz); } static inline int desc_get_buf_len(struct xgmac_dma_desc *p) { u32 len = le32_to_cpu(p->buf_size); return (len & DESC_BUFFER1_SZ_MASK) + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); } static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, int buf_sz) { struct xgmac_dma_desc *end = p + ring_size - 1; memset(p, 0, sizeof(*p) * ring_size); for (; p <= end; p++) desc_set_buf_len(p, buf_sz); end->buf_size |= cpu_to_le32(RXDESC1_END_RING); } static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) { memset(p, 0, sizeof(*p) * ring_size); p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); } static inline int desc_get_owner(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & DESC_OWN; } static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) { /* Clear all fields and set the owner */ p->flags = cpu_to_le32(DESC_OWN); } static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) { u32 tmpflags = le32_to_cpu(p->flags); tmpflags &= TXDESC_END_RING; tmpflags |= flags | DESC_OWN; p->flags = cpu_to_le32(tmpflags); } static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) { u32 tmpflags = le32_to_cpu(p->flags); tmpflags &= TXDESC_END_RING; p->flags = cpu_to_le32(tmpflags); } static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; } static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; } static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) { return le32_to_cpu(p->buf1_addr); } static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, u32 paddr, int len) { p->buf1_addr = cpu_to_le32(paddr); if (len > MAX_DESC_BUF_SZ) p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); } static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, u32 paddr, int len) { desc_set_buf_len(p, len); desc_set_buf_addr(p, paddr, len); } static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) { u32 data = le32_to_cpu(p->flags); u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; if (data & RXDESC_FRAME_TYPE) len -= ETH_FCS_LEN; return len; } static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) { int timeout = 1000; u32 reg = readl(ioaddr + XGMAC_OMR); writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) udelay(1); } static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) { struct xgmac_extra_stats *x = &priv->xstats; u32 status = le32_to_cpu(p->flags); if (!(status & TXDESC_ERROR_SUMMARY)) return 0; netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); if (status & TXDESC_JABBER_TIMEOUT) x->tx_jabber++; if (status & TXDESC_FRAME_FLUSHED) x->tx_frame_flushed++; if (status & TXDESC_UNDERFLOW_ERR) xgmac_dma_flush_tx_fifo(priv->base); if (status & TXDESC_IP_HEADER_ERR) x->tx_ip_header_error++; if (status & TXDESC_LOCAL_FAULT) x->tx_local_fault++; if (status & TXDESC_REMOTE_FAULT) x->tx_remote_fault++; if (status & TXDESC_PAYLOAD_CSUM_ERR) x->tx_payload_error++; return -1; } static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) { struct xgmac_extra_stats *x = &priv->xstats; int ret = CHECKSUM_UNNECESSARY; u32 status = le32_to_cpu(p->flags); u32 ext_status = le32_to_cpu(p->ext_status); if (status & RXDESC_DA_FILTER_FAIL) { netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); x->rx_da_filter_fail++; return -1; } /* All frames should fit into a single buffer */ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) return -1; /* Check if packet has checksum already */ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && !(ext_status & RXDESC_IP_PAYLOAD_MASK)) ret = CHECKSUM_NONE; netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); if (!(status & RXDESC_ERROR_SUMMARY)) return ret; /* Handle any errors */ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) return -1; if (status & RXDESC_EXT_STATUS) { if (ext_status & RXDESC_IP_HEADER_ERR) x->rx_ip_header_error++; if (ext_status & RXDESC_IP_PAYLOAD_ERR) x->rx_payload_error++; netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", ext_status); return CHECKSUM_NONE; } return ret; } static inline void xgmac_mac_enable(void __iomem *ioaddr) { u32 value = readl(ioaddr + XGMAC_CONTROL); value |= MAC_ENABLE_RX | MAC_ENABLE_TX; writel(value, ioaddr + XGMAC_CONTROL); value = readl(ioaddr + XGMAC_DMA_CONTROL); value |= DMA_CONTROL_ST | DMA_CONTROL_SR; writel(value, ioaddr + XGMAC_DMA_CONTROL); } static inline void xgmac_mac_disable(void __iomem *ioaddr) { u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); writel(value, ioaddr + XGMAC_DMA_CONTROL); value = readl(ioaddr + XGMAC_CONTROL); value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); writel(value, ioaddr + XGMAC_CONTROL); } static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, int num) { u32 data; if (addr) { data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; writel(data, ioaddr + XGMAC_ADDR_LOW(num)); } else { writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); writel(0, ioaddr + XGMAC_ADDR_LOW(num)); } } static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, int num) { u32 hi_addr, lo_addr; /* Read the MAC address from the hardware */ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); /* Extract the MAC address from the high and low words */ addr[0] = lo_addr & 0xff; addr[1] = (lo_addr >> 8) & 0xff; addr[2] = (lo_addr >> 16) & 0xff; addr[3] = (lo_addr >> 24) & 0xff; addr[4] = hi_addr & 0xff; addr[5] = (hi_addr >> 8) & 0xff; } static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) { u32 reg; unsigned int flow = 0; priv->rx_pause = rx; priv->tx_pause = tx; if (rx || tx) { if (rx) flow |= XGMAC_FLOW_CTRL_RFE; if (tx) flow |= XGMAC_FLOW_CTRL_TFE; flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); writel(flow, priv->base + XGMAC_FLOW_CTRL); reg = readl(priv->base + XGMAC_OMR); reg |= XGMAC_OMR_EFC; writel(reg, priv->base + XGMAC_OMR); } else { writel(0, priv->base + XGMAC_FLOW_CTRL); reg = readl(priv->base + XGMAC_OMR); reg &= ~XGMAC_OMR_EFC; writel(reg, priv->base + XGMAC_OMR); } return 0; } static void xgmac_rx_refill(struct xgmac_priv *priv) { struct xgmac_dma_desc *p; dma_addr_t paddr; int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN; while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { int entry = priv->rx_head; struct sk_buff *skb; p = priv->dma_rx + entry; if (priv->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(priv->dev, bufsz); if (unlikely(skb == NULL)) break; paddr = dma_map_single(priv->device, skb->data, priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb_any(skb); break; } priv->rx_skbuff[entry] = skb; desc_set_buf_addr(p, paddr, priv->dma_buf_sz); } netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", priv->rx_head, priv->rx_tail); priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); desc_set_rx_owner(p); } } /** * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. */ static int xgmac_dma_desc_rings_init(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); unsigned int bfsize; /* Set the Buffer size according to the MTU; * The total buffer size including any IP offset must be a multiple * of 8 bytes. */ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, GFP_KERNEL); if (!priv->rx_skbuff) return -ENOMEM; priv->dma_rx = dma_alloc_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), &priv->dma_rx_phy, GFP_KERNEL); if (!priv->dma_rx) goto err_dma_rx; priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, GFP_KERNEL); if (!priv->tx_skbuff) goto err_tx_skb; priv->dma_tx = dma_alloc_coherent(priv->device, DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_tx) goto err_dma_tx; netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", priv->dma_rx, priv->dma_tx, (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); priv->rx_tail = 0; priv->rx_head = 0; priv->dma_buf_sz = bfsize; desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); xgmac_rx_refill(priv); priv->tx_tail = 0; priv->tx_head = 0; desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); return 0; err_dma_tx: kfree(priv->tx_skbuff); err_tx_skb: dma_free_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_rx, priv->dma_rx_phy); err_dma_rx: kfree(priv->rx_skbuff); return -ENOMEM; } static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) { int i; struct xgmac_dma_desc *p; if (!priv->rx_skbuff) return; for (i = 0; i < DMA_RX_RING_SZ; i++) { struct sk_buff *skb = priv->rx_skbuff[i]; if (skb == NULL) continue; p = priv->dma_rx + i; dma_unmap_single(priv->device, desc_get_buf_addr(p), priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_skbuff[i] = NULL; } } static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) { int i; struct xgmac_dma_desc *p; if (!priv->tx_skbuff) return; for (i = 0; i < DMA_TX_RING_SZ; i++) { if (priv->tx_skbuff[i] == NULL) continue; p = priv->dma_tx + i; if (desc_get_tx_fs(p)) dma_unmap_single(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); if (desc_get_tx_ls(p)) dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) { /* Release the DMA TX/RX socket buffers */ xgmac_free_rx_skbufs(priv); xgmac_free_tx_skbufs(priv); /* Free the consistent memory allocated for descriptor rings */ if (priv->dma_tx) { dma_free_coherent(priv->device, DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_tx, priv->dma_tx_phy); priv->dma_tx = NULL; } if (priv->dma_rx) { dma_free_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_rx, priv->dma_rx_phy); priv->dma_rx = NULL; } kfree(priv->rx_skbuff); priv->rx_skbuff = NULL; kfree(priv->tx_skbuff); priv->tx_skbuff = NULL; } /** * xgmac_tx: * @priv: private driver structure * Description: it reclaims resources after transmission completes. */ static void xgmac_tx_complete(struct xgmac_priv *priv) { while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { unsigned int entry = priv->tx_tail; struct sk_buff *skb = priv->tx_skbuff[entry]; struct xgmac_dma_desc *p = priv->dma_tx + entry; /* Check if the descriptor is owned by the DMA. */ if (desc_get_owner(p)) break; netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", priv->tx_head, priv->tx_tail); if (desc_get_tx_fs(p)) dma_unmap_single(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); /* Check tx error on the last segment */ if (desc_get_tx_ls(p)) { desc_get_tx_status(priv, p); dev_kfree_skb(skb); } priv->tx_skbuff[entry] = NULL; priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } /* Ensure tx_tail is visible to xgmac_xmit */ smp_mb(); if (unlikely(netif_queue_stopped(priv->dev) && (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } static void xgmac_tx_timeout_work(struct work_struct *work) { u32 reg, value; struct xgmac_priv *priv = container_of(work, struct xgmac_priv, tx_timeout_work); napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); netif_tx_lock(priv->dev); reg = readl(priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); do { value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; } while (value && (value != 0x600000)); xgmac_free_tx_skbufs(priv); desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); priv->tx_tail = 0; priv->tx_head = 0; writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, priv->base + XGMAC_DMA_STATUS); netif_tx_unlock(priv->dev); netif_wake_queue(priv->dev); napi_enable(&priv->napi); /* Enable interrupts */ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } static int xgmac_hw_init(struct net_device *dev) { u32 value, ctrl; int limit; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; /* Save the ctrl register value */ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; /* SW reset */ value = DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); limit = 15000; while (limit-- && (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) cpu_relax(); if (limit < 0) return -EBUSY; value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); writel(0, ioaddr + XGMAC_DMA_INTR_ENA); /* Mask power mgt interrupt */ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); /* XGMAC requires AXI bus init. This is a 'magic number' for now */ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | XGMAC_CONTROL_CAR; if (dev->features & NETIF_F_RXCSUM) ctrl |= XGMAC_CONTROL_IPC; writel(ctrl, ioaddr + XGMAC_CONTROL); writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL); /* Set the HW DMA mode and the COE */ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | XGMAC_OMR_RTC_256, ioaddr + XGMAC_OMR); /* Reset the MMC counters */ writel(1, ioaddr + XGMAC_MMC_CTRL); return 0; } /** * xgmac_open - open entry point of the driver * @dev : pointer to the device structure. * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int xgmac_open(struct net_device *dev) { int ret; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; /* Check that the MAC address is valid. If its not, refuse * to bring the device up. The user must specify an * address using the following linux command: * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { eth_hw_addr_random(dev); netdev_dbg(priv->dev, "generated random MAC address %pM\n", dev->dev_addr); } memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); /* Initialize the XGMAC and descriptors */ xgmac_hw_init(dev); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); ret = xgmac_dma_desc_rings_init(dev); if (ret < 0) return ret; /* Enable the MAC Rx/Tx */ xgmac_mac_enable(ioaddr); napi_enable(&priv->napi); netif_start_queue(dev); /* Enable interrupts */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); return 0; } /** * xgmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ static int xgmac_stop(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); if (readl(priv->base + XGMAC_DMA_INTR_ENA)) napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); netif_tx_disable(dev); /* Disable the MAC core */ xgmac_mac_disable(priv->base); /* Release and free the Rx/Tx resources */ xgmac_free_dma_desc_rings(priv); return 0; } /** * xgmac_xmit: * @skb : the socket buffer * @dev : device pointer * Description : Tx entry point of the driver. */ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); unsigned int entry; int i; u32 irq_flag; int nfrags = skb_shinfo(skb)->nr_frags; struct xgmac_dma_desc *desc, *first; unsigned int desc_flags; unsigned int len; dma_addr_t paddr; priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1); irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT; desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? TXDESC_CSUM_ALL : 0; entry = priv->tx_head; desc = priv->dma_tx + entry; first = desc; len = skb_headlen(skb); paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) goto dma_err; entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); if (i < (nfrags - 1)) desc_set_tx_owner(desc, desc_flags); } /* Interrupt on completition only for the latest segment */ if (desc != first) desc_set_tx_owner(desc, desc_flags | TXDESC_LAST_SEG | irq_flag); else desc_flags |= TXDESC_LAST_SEG | irq_flag; /* Set owner on first desc last to avoid race condition */ wmb(); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); writel(1, priv->base + XGMAC_DMA_TX_POLL); priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); /* Ensure tx_head update is visible to tx completion */ smp_mb(); if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { netif_stop_queue(dev); /* Ensure netif_stop_queue is visible to tx completion */ smp_mb(); if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) netif_start_queue(dev); } return NETDEV_TX_OK; dma_err: entry = priv->tx_head; for ( ; i > 0; i--) { entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; priv->tx_skbuff[entry] = NULL; dma_unmap_page(priv->device, desc_get_buf_addr(desc), desc_get_buf_len(desc), DMA_TO_DEVICE); desc_clear_tx_owner(desc); } desc = first; dma_unmap_single(priv->device, desc_get_buf_addr(desc), desc_get_buf_len(desc), DMA_TO_DEVICE); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int xgmac_rx(struct xgmac_priv *priv, int limit) { unsigned int entry; unsigned int count = 0; struct xgmac_dma_desc *p; while (count < limit) { int ip_checksum; struct sk_buff *skb; int frame_len; if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) break; entry = priv->rx_tail; p = priv->dma_rx + entry; if (desc_get_owner(p)) break; count++; priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); /* read the status of the incoming frame */ ip_checksum = desc_get_rx_status(priv, p); if (ip_checksum < 0) continue; skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); break; } priv->rx_skbuff[entry] = NULL; frame_len = desc_get_rx_frame_len(p); netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", frame_len, ip_checksum); skb_put(skb, frame_len); dma_unmap_single(priv->device, desc_get_buf_addr(p), priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, priv->dev); skb->ip_summed = ip_checksum; if (ip_checksum == CHECKSUM_NONE) netif_receive_skb(skb); else napi_gro_receive(&priv->napi, skb); } xgmac_rx_refill(priv); return count; } /** * xgmac_poll - xgmac poll method (NAPI) * @napi : pointer to the napi structure. * @budget : maximum number of packets that the current CPU can receive from * all interfaces. * Description : * This function implements the the reception process. * Also it runs the TX completion thread */ static int xgmac_poll(struct napi_struct *napi, int budget) { struct xgmac_priv *priv = container_of(napi, struct xgmac_priv, napi); int work_done = 0; xgmac_tx_complete(priv); work_done = xgmac_rx(priv, budget); if (work_done < budget) { napi_complete(napi); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; } /** * xgmac_tx_timeout * @dev : Pointer to net device structure * Description: this function is called when a packet transmission fails to * complete within a reasonable tmrate. The driver will mark the error in the * netdev structure and arrange for the device to be reset to a sane state * in order to transmit a new packet. */ static void xgmac_tx_timeout(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); schedule_work(&priv->tx_timeout_work); } /** * xgmac_set_rx_mode - entry point for multicast addressing * @dev : pointer to the device structure * Description: * This function is a driver entry point which gets called by the kernel * whenever multicast addresses must be enabled/disabled. * Return value: * void. */ static void xgmac_set_rx_mode(struct net_device *dev) { int i; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; unsigned int value = 0; u32 hash_filter[XGMAC_NUM_HASH]; int reg = 1; struct netdev_hw_addr *ha; bool use_hash = false; netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value |= XGMAC_FRAME_FILTER_PR; memset(hash_filter, 0, sizeof(hash_filter)); if (netdev_uc_count(dev) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; } netdev_for_each_uc_addr(ha, dev) { if (use_hash) { u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; /* The most significant 4 bits determine the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } else { xgmac_set_mac_addr(ioaddr, ha->addr, reg); reg++; } } if (dev->flags & IFF_ALLMULTI) { value |= XGMAC_FRAME_FILTER_PM; goto out; } if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; } else { use_hash = false; } netdev_for_each_mc_addr(ha, dev) { if (use_hash) { u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; /* The most significant 4 bits determine the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } else { xgmac_set_mac_addr(ioaddr, ha->addr, reg); reg++; } } out: for (i = reg; i <= priv->max_macs; i++) xgmac_set_mac_addr(ioaddr, NULL, i); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); writel(value, ioaddr + XGMAC_FRAME_FILTER); } /** * xgmac_change_mtu - entry point to change MTU size for the device. * @dev : device pointer. * @new_mtu : the new MTU size for the device. * Description: the Maximum Transfer Unit (MTU) is used by the network layer * to drive packet transmission. Ethernet has an MTU of 1500 octets * (ETH_DATA_LEN). This value can be changed with ifconfig. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int xgmac_change_mtu(struct net_device *dev, int new_mtu) { struct xgmac_priv *priv = netdev_priv(dev); int old_mtu; if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); return -EINVAL; } old_mtu = dev->mtu; /* return early if the buffer sizes will not change */ if (old_mtu == new_mtu) return 0; /* Stop everything, get ready to change the MTU */ if (!netif_running(dev)) return 0; /* Bring interface down, change mtu and bring interface back up */ xgmac_stop(dev); dev->mtu = new_mtu; return xgmac_open(dev); } static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) { u32 intr_status; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT); if (intr_status & XGMAC_INT_STAT_PMT) { netdev_dbg(priv->dev, "received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT */ readl(ioaddr + XGMAC_PMT); } return IRQ_HANDLED; } static irqreturn_t xgmac_interrupt(int irq, void *dev_id) { u32 intr_status; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_extra_stats *x = &priv->xstats; /* read the status register (CSR5) */ intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS); intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA); __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS); /* It displays the DMA process states (CSR5 register) */ /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { if (intr_status & DMA_STATUS_TJT) { netdev_err(priv->dev, "transmit jabber\n"); x->tx_jabber++; } if (intr_status & DMA_STATUS_RU) x->rx_buf_unav++; if (intr_status & DMA_STATUS_RPS) { netdev_err(priv->dev, "receive process stopped\n"); x->rx_process_stopped++; } if (intr_status & DMA_STATUS_ETI) { netdev_err(priv->dev, "transmit early interrupt\n"); x->tx_early++; } if (intr_status & DMA_STATUS_TPS) { netdev_err(priv->dev, "transmit process stopped\n"); x->tx_process_stopped++; schedule_work(&priv->tx_timeout_work); } if (intr_status & DMA_STATUS_FBI) { netdev_err(priv->dev, "fatal bus error\n"); x->fatal_bus_error++; } } /* TX/RX NORMAL interrupts */ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) { __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); napi_schedule(&priv->napi); } return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by NETCONSOLE and other diagnostic tools * to allow network I/O with interrupts disabled. */ static void xgmac_poll_controller(struct net_device *dev) { disable_irq(dev->irq); xgmac_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static struct rtnl_link_stats64 * xgmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *base = priv->base; u32 count; spin_lock_bh(&priv->stats_lock); writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); storage->tx_packets = count; storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); writel(0, base + XGMAC_MMC_CTRL); spin_unlock_bh(&priv->stats_lock); return storage; } static int xgmac_set_mac_address(struct net_device *dev, void *p) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); return 0; } static int xgmac_set_features(struct net_device *dev, netdev_features_t features) { u32 ctrl; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; netdev_features_t changed = dev->features ^ features; if (!(changed & NETIF_F_RXCSUM)) return 0; ctrl = readl(ioaddr + XGMAC_CONTROL); if (features & NETIF_F_RXCSUM) ctrl |= XGMAC_CONTROL_IPC; else ctrl &= ~XGMAC_CONTROL_IPC; writel(ctrl, ioaddr + XGMAC_CONTROL); return 0; } static const struct net_device_ops xgmac_netdev_ops = { .ndo_open = xgmac_open, .ndo_start_xmit = xgmac_xmit, .ndo_stop = xgmac_stop, .ndo_change_mtu = xgmac_change_mtu, .ndo_set_rx_mode = xgmac_set_rx_mode, .ndo_tx_timeout = xgmac_tx_timeout, .ndo_get_stats64 = xgmac_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xgmac_poll_controller, #endif .ndo_set_mac_address = xgmac_set_mac_address, .ndo_set_features = xgmac_set_features, }; static int xgmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->autoneg = 0; cmd->duplex = DUPLEX_FULL; ethtool_cmd_speed_set(cmd, 10000); cmd->supported = 0; cmd->advertising = 0; cmd->transceiver = XCVR_INTERNAL; return 0; } static void xgmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgmac_priv *priv = netdev_priv(netdev); pause->rx_pause = priv->rx_pause; pause->tx_pause = priv->tx_pause; } static int xgmac_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgmac_priv *priv = netdev_priv(netdev); if (pause->autoneg) return -EINVAL; return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); } struct xgmac_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; bool is_reg; }; #define XGMAC_STAT(m) \ { #m, offsetof(struct xgmac_priv, xstats.m), false } #define XGMAC_HW_STAT(m, reg_offset) \ { #m, reg_offset, true } static const struct xgmac_stats xgmac_gstrings_stats[] = { XGMAC_STAT(tx_frame_flushed), XGMAC_STAT(tx_payload_error), XGMAC_STAT(tx_ip_header_error), XGMAC_STAT(tx_local_fault), XGMAC_STAT(tx_remote_fault), XGMAC_STAT(tx_early), XGMAC_STAT(tx_process_stopped), XGMAC_STAT(tx_jabber), XGMAC_STAT(rx_buf_unav), XGMAC_STAT(rx_process_stopped), XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_da_filter_fail), XGMAC_STAT(fatal_bus_error), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), }; #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) static void xgmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { struct xgmac_priv *priv = netdev_priv(dev); void *p = priv; int i; for (i = 0; i < XGMAC_STATS_LEN; i++) { if (xgmac_gstrings_stats[i].is_reg) *data++ = readl(priv->base + xgmac_gstrings_stats[i].stat_offset); else *data++ = *(u32 *)(p + xgmac_gstrings_stats[i].stat_offset); } } static int xgmac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return XGMAC_STATS_LEN; default: return -EINVAL; } } static void xgmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; u8 *p = data; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < XGMAC_STATS_LEN; i++) { memcpy(p, xgmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; default: WARN_ON(1); break; } } static void xgmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct xgmac_priv *priv = netdev_priv(dev); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->wolopts = priv->wolopts; } } static int xgmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct xgmac_priv *priv = netdev_priv(dev); u32 support = WAKE_MAGIC | WAKE_UCAST; if (!device_can_wakeup(priv->device)) return -ENOTSUPP; if (wol->wolopts & ~support) return -EINVAL; priv->wolopts = wol->wolopts; if (wol->wolopts) { device_set_wakeup_enable(priv->device, 1); enable_irq_wake(dev->irq); } else { device_set_wakeup_enable(priv->device, 0); disable_irq_wake(dev->irq); } return 0; } static const struct ethtool_ops xgmac_ethtool_ops = { .get_settings = xgmac_ethtool_getsettings, .get_link = ethtool_op_get_link, .get_pauseparam = xgmac_get_pauseparam, .set_pauseparam = xgmac_set_pauseparam, .get_ethtool_stats = xgmac_get_ethtool_stats, .get_strings = xgmac_get_strings, .get_wol = xgmac_get_wol, .set_wol = xgmac_set_wol, .get_sset_count = xgmac_get_sset_count, }; /** * xgmac_probe * @pdev: platform device pointer * Description: the driver is initialized through platform_device. */ static int xgmac_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct net_device *ndev = NULL; struct xgmac_priv *priv = NULL; u32 uid; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), pdev->name)) return -EBUSY; ndev = alloc_etherdev(sizeof(struct xgmac_priv)); if (!ndev) { ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); platform_set_drvdata(pdev, ndev); ether_setup(ndev); ndev->netdev_ops = &xgmac_netdev_ops; SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); priv->device = &pdev->dev; priv->dev = ndev; priv->rx_pause = 1; priv->tx_pause = 1; priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { netdev_err(ndev, "ioremap failed\n"); ret = -ENOMEM; goto err_io; } uid = readl(priv->base + XGMAC_VERSION); netdev_info(ndev, "h/w version is 0x%x\n", uid); /* Figure out how many valid mac address filter registers we have */ writel(1, priv->base + XGMAC_ADDR_HIGH(31)); if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) priv->max_macs = 31; else priv->max_macs = 7; writel(0, priv->base + XGMAC_DMA_INTR_ENA); ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq == -ENXIO) { netdev_err(ndev, "No irq resource\n"); ret = ndev->irq; goto err_irq; } ret = request_irq(ndev->irq, xgmac_interrupt, 0, dev_name(&pdev->dev), ndev); if (ret < 0) { netdev_err(ndev, "Could not request irq %d - ret %d)\n", ndev->irq, ret); goto err_irq; } priv->pmt_irq = platform_get_irq(pdev, 1); if (priv->pmt_irq == -ENXIO) { netdev_err(ndev, "No pmt irq resource\n"); ret = priv->pmt_irq; goto err_pmt_irq; } ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, dev_name(&pdev->dev), ndev); if (ret < 0) { netdev_err(ndev, "Could not request irq %d - ret %d)\n", priv->pmt_irq, ret); goto err_pmt_irq; } device_set_wakeup_capable(&pdev->dev, 1); if (device_can_wakeup(priv->device)) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->features |= ndev->hw_features; ndev->priv_flags |= IFF_UNICAST_FLT; /* Get the MAC address */ xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); if (!is_valid_ether_addr(ndev->dev_addr)) netdev_warn(ndev, "MAC address %pM not valid", ndev->dev_addr); netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); ret = register_netdev(ndev); if (ret) goto err_reg; return 0; err_reg: netif_napi_del(&priv->napi); free_irq(priv->pmt_irq, ndev); err_pmt_irq: free_irq(ndev->irq, ndev); err_irq: iounmap(priv->base); err_io: free_netdev(ndev); err_alloc: release_mem_region(res->start, resource_size(res)); return ret; } /** * xgmac_dvr_remove * @pdev: platform device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings, * unregisters the MDIO bus and unmaps the allocated memory. */ static int xgmac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xgmac_priv *priv = netdev_priv(ndev); struct resource *res; xgmac_mac_disable(priv->base); /* Free the IRQ lines */ free_irq(ndev->irq, ndev); free_irq(priv->pmt_irq, ndev); unregister_netdev(ndev); netif_napi_del(&priv->napi); iounmap(priv->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); free_netdev(ndev); return 0; } #ifdef CONFIG_PM_SLEEP static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) { unsigned int pmt = 0; if (mode & WAKE_MAGIC) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; if (mode & WAKE_UCAST) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; writel(pmt, ioaddr + XGMAC_PMT); } static int xgmac_suspend(struct device *dev) { struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); struct xgmac_priv *priv = netdev_priv(ndev); u32 value; if (!ndev || !netif_running(ndev)) return 0; netif_device_detach(ndev); napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); if (device_may_wakeup(priv->device)) { /* Stop TX/RX DMA Only */ value = readl(priv->base + XGMAC_DMA_CONTROL); value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); writel(value, priv->base + XGMAC_DMA_CONTROL); xgmac_pmt(priv->base, priv->wolopts); } else xgmac_mac_disable(priv->base); return 0; } static int xgmac_resume(struct device *dev) { struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); struct xgmac_priv *priv = netdev_priv(ndev); void __iomem *ioaddr = priv->base; if (!netif_running(ndev)) return 0; xgmac_pmt(ioaddr, 0); /* Enable the MAC and DMA */ xgmac_mac_enable(ioaddr); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); netif_device_attach(ndev); napi_enable(&priv->napi); return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); static const struct of_device_id xgmac_of_match[] = { { .compatible = "calxeda,hb-xgmac", }, {}, }; MODULE_DEVICE_TABLE(of, xgmac_of_match); static struct platform_driver xgmac_driver = { .driver = { .name = "calxedaxgmac", .of_match_table = xgmac_of_match, }, .probe = xgmac_probe, .remove = xgmac_remove, .driver.pm = &xgmac_pm_ops, }; module_platform_driver(xgmac_driver); MODULE_AUTHOR("Calxeda, Inc."); MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
billy-wang/smdkc110-Gingerbread-kernel
drivers/net/phy/micrel.c
760
2598
/* * drivers/net/phy/micrel.c * * Driver for Micrel PHYs * * Author: David J. Choi * * Copyright (c) 2010 Micrel, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Support : ksz9021 , vsc8201, ks8001 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/phy.h> #define PHY_ID_KSZ9021 0x00221611 #define PHY_ID_VSC8201 0x000FC413 #define PHY_ID_KS8001 0x0022161A static int kszphy_config_init(struct phy_device *phydev) { return 0; } static struct phy_driver ks8001_driver = { .phy_id = PHY_ID_KS8001, .name = "Micrel KS8001", .phy_id_mask = 0x00fffff0, .features = PHY_BASIC_FEATURES, .flags = PHY_POLL, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .driver = { .owner = THIS_MODULE,}, }; static struct phy_driver vsc8201_driver = { .phy_id = PHY_ID_VSC8201, .name = "Micrel VSC8201", .phy_id_mask = 0x00fffff0, .features = PHY_BASIC_FEATURES, .flags = PHY_POLL, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .driver = { .owner = THIS_MODULE,}, }; static struct phy_driver ksz9021_driver = { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000fff10, .name = "Micrel KSZ9021 Gigabit PHY", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause, .flags = PHY_POLL, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .driver = { .owner = THIS_MODULE, }, }; static int __init ksphy_init(void) { int ret; ret = phy_driver_register(&ks8001_driver); if (ret) goto err1; ret = phy_driver_register(&vsc8201_driver); if (ret) goto err2; ret = phy_driver_register(&ksz9021_driver); if (ret) goto err3; return 0; err3: phy_driver_unregister(&vsc8201_driver); err2: phy_driver_unregister(&ks8001_driver); err1: return ret; } static void __exit ksphy_exit(void) { phy_driver_unregister(&ks8001_driver); phy_driver_unregister(&vsc8201_driver); phy_driver_unregister(&ksz9021_driver); } module_init(ksphy_init); module_exit(ksphy_exit); MODULE_DESCRIPTION("Micrel PHY driver"); MODULE_AUTHOR("David J. Choi"); MODULE_LICENSE("GPL"); static struct mdio_device_id micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000fff10 }, { PHY_ID_VSC8201, 0x00fffff0 }, { PHY_ID_KS8001, 0x00fffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, micrel_tbl);
gpl-2.0
cattleprod/XCeLL-XV
drivers/net/sfc/mcdi_mac.c
760
3751
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2009 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include "net_driver.h" #include "efx.h" #include "mac.h" #include "mcdi.h" #include "mcdi_pcol.h" static int efx_mcdi_set_mac(struct efx_nic *efx) { u32 reject, fcntl; u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST, efx->net_dev->dev_addr, ETH_ALEN); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); /* The MCDI command provides for controlling accept/reject * of broadcast packets too, but the driver doesn't currently * expose this. */ reject = (efx->promiscuous) ? 0 : (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); switch (efx->wanted_fc) { case EFX_FC_RX | EFX_FC_TX: fcntl = MC_CMD_FCNTL_BIDIR; break; case EFX_FC_RX: fcntl = MC_CMD_FCNTL_RESPOND; break; default: fcntl = MC_CMD_FCNTL_OFF; break; } if (efx->wanted_fc & EFX_FC_AUTO) fcntl = MC_CMD_FCNTL_AUTO; MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), NULL, 0, NULL); } static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) { u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; size_t outlength; int rc; BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), &outlength); if (rc) goto fail; *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT); return 0; fail: EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, u32 dma_len, int enable, int clear) { u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; int rc; efx_dword_t *cmd_ptr; int period = enable ? 1000 : 0; u32 addr_hi; u32 addr_lo; BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0); addr_lo = ((u64)dma_addr) >> 0; addr_hi = ((u64)dma_addr) >> 32; MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); EFX_POPULATE_DWORD_7(*cmd_ptr, MC_CMD_MAC_STATS_CMD_DMA, !!enable, MC_CMD_MAC_STATS_CMD_CLEAR, clear, MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable, MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1, MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: EFX_ERR(efx, "%s: %s failed rc=%d\n", __func__, enable ? "enable" : "disable", rc); return rc; } static int efx_mcdi_mac_reconfigure(struct efx_nic *efx) { int rc; rc = efx_mcdi_set_mac(efx); if (rc != 0) return rc; /* Restore the multicast hash registers. */ efx->type->push_multicast_hash(efx); return 0; } static bool efx_mcdi_mac_check_fault(struct efx_nic *efx) { u32 faults; int rc = efx_mcdi_get_mac_faults(efx, &faults); return (rc != 0) || (faults != 0); } struct efx_mac_operations efx_mcdi_mac_operations = { .reconfigure = efx_mcdi_mac_reconfigure, .update_stats = efx_port_dummy_op_void, .check_fault = efx_mcdi_mac_check_fault, };
gpl-2.0
groeck/linux
tools/perf/tests/open-syscall-tp-fields.c
760
2556
#include "perf.h" #include "evlist.h" #include "evsel.h" #include "thread_map.h" #include "tests.h" #include "debug.h" int test__syscall_open_tp_fields(void) { struct record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .no_buffering = true, .freq = 1, .mmap_pages = 256, .raw_samples = true, }; const char *filename = "/etc/passwd"; int flags = O_RDONLY | O_DIRECTORY; struct perf_evlist *evlist = perf_evlist__new(); struct perf_evsel *evsel; int err = -1, i, nr_events = 0, nr_polls = 0; char sbuf[STRERR_BUFSIZE]; if (evlist == NULL) { pr_debug("%s: perf_evlist__new\n", __func__); goto out; } evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); if (evsel == NULL) { pr_debug("%s: perf_evsel__newtp\n", __func__); goto out_delete_evlist; } perf_evlist__add(evlist, evsel); err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("%s: perf_evlist__create_maps\n", __func__); goto out_delete_evlist; } perf_evsel__config(evsel, &opts); evlist->threads->map[0] = getpid(); err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, UINT_MAX, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } perf_evlist__enable(evlist); /* * Generate the event: */ open(filename, flags); while (1) { int before = nr_events; for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { const u32 type = event->header.type; int tp_flags; struct perf_sample sample; ++nr_events; if (type != PERF_RECORD_SAMPLE) { perf_evlist__mmap_consume(evlist, i); continue; } err = perf_evsel__parse_sample(evsel, event, &sample); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_delete_evlist; } tp_flags = perf_evsel__intval(evsel, &sample, "flags"); if (flags != tp_flags) { pr_debug("%s: Expected flags=%#x, got %#x\n", __func__, flags, tp_flags); goto out_delete_evlist; } goto out_ok; } } if (nr_events == before) perf_evlist__poll(evlist, 10); if (++nr_polls > 5) { pr_debug("%s: no events!\n", __func__); goto out_delete_evlist; } } out_ok: err = 0; out_delete_evlist: perf_evlist__delete(evlist); out: return err; }
gpl-2.0
CyanogenMod/lge-kernel-msm7x30
drivers/media/video/omap24xxcam.c
760
44576
/* * drivers/media/video/omap24xxcam.c * * OMAP 2 camera block driver. * * Copyright (C) 2004 MontaVista Software, Inc. * Copyright (C) 2004 Texas Instruments. * Copyright (C) 2007-2008 Nokia Corporation. * * Contact: Sakari Ailus <sakari.ailus@nokia.com> * * Based on code from Andy Lowe <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/videodev2.h> #include <linux/pci.h> /* needed for videobufs */ #include <linux/version.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "omap24xxcam.h" #define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0) #define RESET_TIMEOUT_NS 10000 static void omap24xxcam_reset(struct omap24xxcam_device *cam); static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam); static void omap24xxcam_device_unregister(struct v4l2_int_device *s); static int omap24xxcam_remove(struct platform_device *pdev); /* module parameters */ static int video_nr = -1; /* video device minor (-1 ==> auto assign) */ /* * Maximum amount of memory to use for capture buffers. * Default is 4800KB, enough to double-buffer SXGA. */ static int capture_mem = 1280 * 960 * 2 * 2; static struct v4l2_int_device omap24xxcam; /* * * Clocks. * */ static void omap24xxcam_clock_put(struct omap24xxcam_device *cam) { if (cam->ick != NULL && !IS_ERR(cam->ick)) clk_put(cam->ick); if (cam->fck != NULL && !IS_ERR(cam->fck)) clk_put(cam->fck); cam->ick = cam->fck = NULL; } static int omap24xxcam_clock_get(struct omap24xxcam_device *cam) { int rval = 0; cam->fck = clk_get(cam->dev, "fck"); if (IS_ERR(cam->fck)) { dev_err(cam->dev, "can't get camera fck"); rval = PTR_ERR(cam->fck); omap24xxcam_clock_put(cam); return rval; } cam->ick = clk_get(cam->dev, "ick"); if (IS_ERR(cam->ick)) { dev_err(cam->dev, "can't get camera ick"); rval = PTR_ERR(cam->ick); omap24xxcam_clock_put(cam); } return rval; } static void omap24xxcam_clock_on(struct omap24xxcam_device *cam) { clk_enable(cam->fck); clk_enable(cam->ick); } static void omap24xxcam_clock_off(struct omap24xxcam_device *cam) { clk_disable(cam->fck); clk_disable(cam->ick); } /* * * Camera core * */ /* * Set xclk. * * To disable xclk, use value zero. */ static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam, u32 xclk) { if (xclk) { u32 divisor = CAM_MCLK / xclk; if (divisor == 1) omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_BYPASS); else omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_XCLK, divisor); } else omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW); } static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam) { /* * Setting the camera core AUTOIDLE bit causes problems with frame * synchronization, so we will clear the AUTOIDLE bit instead. */ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG, CC_SYSCONFIG_AUTOIDLE); /* program the camera interface DMA packet size */ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA, CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1)); /* enable camera core error interrupts */ omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE, CC_IRQENABLE_FW_ERR_IRQ | CC_IRQENABLE_FSC_ERR_IRQ | CC_IRQENABLE_SSC_ERR_IRQ | CC_IRQENABLE_FIFO_OF_IRQ); } /* * Enable the camera core. * * Data transfer to the camera DMA starts from next starting frame. */ static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam) { omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL, cam->cc_ctrl); } /* * Disable camera core. * * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The * core internal state machines will be reset. Use * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current * frame completely. */ static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam) { omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL, CC_CTRL_CC_RST); } /* Interrupt service routine for camera core interrupts. */ static void omap24xxcam_core_isr(struct omap24xxcam_device *cam) { u32 cc_irqstatus; const u32 cc_irqstatus_err = CC_IRQSTATUS_FW_ERR_IRQ | CC_IRQSTATUS_FSC_ERR_IRQ | CC_IRQSTATUS_SSC_ERR_IRQ | CC_IRQSTATUS_FIFO_UF_IRQ | CC_IRQSTATUS_FIFO_OF_IRQ; cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS); omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS, cc_irqstatus); if (cc_irqstatus & cc_irqstatus_err && !atomic_read(&cam->in_reset)) { dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n", cc_irqstatus); omap24xxcam_reset(cam); } } /* * * videobuf_buffer handling. * * Memory for mmapped videobuf_buffers is not allocated * conventionally, but by several kmalloc allocations and then * creating the scatterlist on our own. User-space buffers are handled * normally. * */ /* * Free the memory-mapped buffer memory allocated for a * videobuf_buffer and the associated scatterlist. */ static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); size_t alloc_size; struct page *page; int i; if (dma->sglist == NULL) return; i = dma->sglen; while (i) { i--; alloc_size = sg_dma_len(&dma->sglist[i]); page = sg_page(&dma->sglist[i]); do { ClearPageReserved(page++); } while (alloc_size -= PAGE_SIZE); __free_pages(sg_page(&dma->sglist[i]), get_order(sg_dma_len(&dma->sglist[i]))); } kfree(dma->sglist); dma->sglist = NULL; } /* Release all memory related to the videobuf_queue. */ static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq) { int i; mutex_lock(&vbq->vb_lock); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == vbq->bufs[i]) continue; if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory) continue; vbq->ops->buf_release(vbq, vbq->bufs[i]); omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]); kfree(vbq->bufs[i]); vbq->bufs[i] = NULL; } mutex_unlock(&vbq->vb_lock); videobuf_mmap_free(vbq); } /* * Allocate physically as contiguous as possible buffer for video * frame and allocate and build DMA scatter-gather list for it. */ static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb) { unsigned int order; size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */ struct page *page; int max_pages, err = 0, i = 0; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); /* * allocate maximum size scatter-gather list. Note this is * overhead. We may not use as many entries as we allocate */ max_pages = vb->bsize >> PAGE_SHIFT; dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL); if (dma->sglist == NULL) { err = -ENOMEM; goto out; } while (size) { order = get_order(size); /* * do not over-allocate even if we would get larger * contiguous chunk that way */ if ((PAGE_SIZE << order) > size) order--; /* try to allocate as many contiguous pages as possible */ page = alloc_pages(GFP_KERNEL | GFP_DMA, order); /* if allocation fails, try to allocate smaller amount */ while (page == NULL) { order--; page = alloc_pages(GFP_KERNEL | GFP_DMA, order); if (page == NULL && !order) { err = -ENOMEM; goto out; } } size -= (PAGE_SIZE << order); /* append allocated chunk of pages into scatter-gather list */ sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0); dma->sglen++; i++; alloc_size = (PAGE_SIZE << order); /* clear pages before giving them to user space */ memset(page_address(page), 0, alloc_size); /* mark allocated pages reserved */ do { SetPageReserved(page++); } while (alloc_size -= PAGE_SIZE); } /* * REVISIT: not fully correct to assign nr_pages == sglen but * video-buf is passing nr_pages for e.g. unmap_sg calls */ dma->nr_pages = dma->sglen; dma->direction = PCI_DMA_FROMDEVICE; return 0; out: omap24xxcam_vbq_free_mmap_buffer(vb); return err; } static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq, unsigned int count) { int i, err = 0; struct omap24xxcam_fh *fh = container_of(vbq, struct omap24xxcam_fh, vbq); mutex_lock(&vbq->vb_lock); for (i = 0; i < count; i++) { err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]); if (err) goto out; dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n", videobuf_to_dma(vbq->bufs[i])->sglen, i); } mutex_unlock(&vbq->vb_lock); return 0; out: while (i) { i--; omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]); } mutex_unlock(&vbq->vb_lock); return err; } /* * This routine is called from interrupt context when a scatter-gather DMA * transfer of a videobuf_buffer completes. */ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma, u32 csr, void *arg) { struct omap24xxcam_device *cam = container_of(sgdma, struct omap24xxcam_device, sgdma); struct omap24xxcam_fh *fh = cam->streaming->private_data; struct videobuf_buffer *vb = (struct videobuf_buffer *)arg; const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP; unsigned long flags; spin_lock_irqsave(&cam->core_enable_disable_lock, flags); if (--cam->sgdma_in_queue == 0) omap24xxcam_core_disable(cam); spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); do_gettimeofday(&vb->ts); vb->field_count = atomic_add_return(2, &fh->field_count); if (csr & csr_error) { vb->state = VIDEOBUF_ERROR; if (!atomic_read(&fh->cam->in_reset)) { dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr); omap24xxcam_reset(cam); } } else vb->state = VIDEOBUF_DONE; wake_up(&vb->done); } static void omap24xxcam_vbq_release(struct videobuf_queue *vbq, struct videobuf_buffer *vb) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); /* wait for buffer, especially to get out of the sgdma queue */ videobuf_waiton(vb, 0, 0); if (vb->memory == V4L2_MEMORY_MMAP) { dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction); dma->direction = DMA_NONE; } else { videobuf_dma_unmap(vbq, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } vb->state = VIDEOBUF_NEEDS_INIT; } /* * Limit the number of available kernel image capture buffers based on the * number requested, the currently selected image size, and the maximum * amount of memory permitted for kernel capture buffers. */ static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt, unsigned int *size) { struct omap24xxcam_fh *fh = vbq->priv_data; if (*cnt <= 0) *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */ if (*cnt > VIDEO_MAX_FRAME) *cnt = VIDEO_MAX_FRAME; *size = fh->pix.sizeimage; /* accessing fh->cam->capture_mem is ok, it's constant */ if (*size * *cnt > fh->cam->capture_mem) *cnt = fh->cam->capture_mem / *size; return 0; } static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq, struct videobuf_dmabuf *dma) { int err = 0; dma->direction = PCI_DMA_FROMDEVICE; if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) { kfree(dma->sglist); dma->sglist = NULL; dma->sglen = 0; err = -EIO; } return err; } static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq, struct videobuf_buffer *vb, enum v4l2_field field) { struct omap24xxcam_fh *fh = vbq->priv_data; int err = 0; /* * Accessing pix here is okay since it's constant while * streaming is on (and we only get called then). */ if (vb->baddr) { /* This is a userspace buffer. */ if (fh->pix.sizeimage > vb->bsize) { /* The buffer isn't big enough. */ err = -EINVAL; } else vb->size = fh->pix.sizeimage; } else { if (vb->state != VIDEOBUF_NEEDS_INIT) { /* * We have a kernel bounce buffer that has * already been allocated. */ if (fh->pix.sizeimage > vb->size) { /* * The image size has been changed to * a larger size since this buffer was * allocated, so we need to free and * reallocate it. */ omap24xxcam_vbq_release(vbq, vb); vb->size = fh->pix.sizeimage; } } else { /* We need to allocate a new kernel bounce buffer. */ vb->size = fh->pix.sizeimage; } } if (err) return err; vb->width = fh->pix.width; vb->height = fh->pix.height; vb->field = field; if (vb->state == VIDEOBUF_NEEDS_INIT) { if (vb->memory == V4L2_MEMORY_MMAP) /* * we have built the scatter-gather list by ourself so * do the scatter-gather mapping as well */ err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb)); else err = videobuf_iolock(vbq, vb, NULL); } if (!err) vb->state = VIDEOBUF_PREPARED; else omap24xxcam_vbq_release(vbq, vb); return err; } static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq, struct videobuf_buffer *vb) { struct omap24xxcam_fh *fh = vbq->priv_data; struct omap24xxcam_device *cam = fh->cam; enum videobuf_state state = vb->state; unsigned long flags; int err; /* * FIXME: We're marking the buffer active since we have no * pretty way of marking it active exactly when the * scatter-gather transfer starts. */ vb->state = VIDEOBUF_ACTIVE; err = omap24xxcam_sgdma_queue(&fh->cam->sgdma, videobuf_to_dma(vb)->sglist, videobuf_to_dma(vb)->sglen, vb->size, omap24xxcam_vbq_complete, vb); if (!err) { spin_lock_irqsave(&cam->core_enable_disable_lock, flags); if (++cam->sgdma_in_queue == 1 && !atomic_read(&cam->in_reset)) omap24xxcam_core_enable(cam); spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); } else { /* * Oops. We're not supposed to get any errors here. * The only way we could get an error is if we ran out * of scatter-gather DMA slots, but we are supposed to * have at least as many scatter-gather DMA slots as * video buffers so that can't happen. */ dev_err(cam->dev, "failed to queue a video buffer for dma!\n"); dev_err(cam->dev, "likely a bug in the driver!\n"); vb->state = state; } } static struct videobuf_queue_ops omap24xxcam_vbq_ops = { .buf_setup = omap24xxcam_vbq_setup, .buf_prepare = omap24xxcam_vbq_prepare, .buf_queue = omap24xxcam_vbq_queue, .buf_release = omap24xxcam_vbq_release, }; /* * * OMAP main camera system * */ /* * Reset camera block to power-on state. */ static void omap24xxcam_poweron_reset(struct omap24xxcam_device *cam) { int max_loop = RESET_TIMEOUT_NS; /* Reset whole camera subsystem */ omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG, CAM_SYSCONFIG_SOFTRESET); /* Wait till it's finished */ while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS) & CAM_SYSSTATUS_RESETDONE) && --max_loop) { ndelay(1); } if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS) & CAM_SYSSTATUS_RESETDONE)) dev_err(cam->dev, "camera soft reset timeout\n"); } /* * (Re)initialise the camera block. */ static void omap24xxcam_hwinit(struct omap24xxcam_device *cam) { omap24xxcam_poweron_reset(cam); /* set the camera subsystem autoidle bit */ omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG, CAM_SYSCONFIG_AUTOIDLE); /* set the camera MMU autoidle bit */ omap24xxcam_reg_out(cam->mmio_base, CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG, CAMMMU_SYSCONFIG_AUTOIDLE); omap24xxcam_core_hwinit(cam); omap24xxcam_dma_hwinit(&cam->sgdma.dma); } /* * Callback for dma transfer stalling. */ static void omap24xxcam_stalled_dma_reset(unsigned long data) { struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data; if (!atomic_read(&cam->in_reset)) { dev_dbg(cam->dev, "dma stalled, resetting camera\n"); omap24xxcam_reset(cam); } } /* * Stop capture. Mark we're doing a reset, stop DMA transfers and * core. (No new scatter-gather transfers will be queued whilst * in_reset is non-zero.) * * If omap24xxcam_capture_stop is called from several places at * once, only the first call will have an effect. Similarly, the last * call omap24xxcam_streaming_cont will have effect. * * Serialisation is ensured by using cam->core_enable_disable_lock. */ static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam) { unsigned long flags; spin_lock_irqsave(&cam->core_enable_disable_lock, flags); if (atomic_inc_return(&cam->in_reset) != 1) { spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); return; } omap24xxcam_core_disable(cam); spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); omap24xxcam_sgdma_sync(&cam->sgdma); } /* * Reset and continue streaming. * * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL * register is supposed to be sufficient to recover from a camera * interface error, but it doesn't seem to be enough. If we only do * that then subsequent image captures are out of sync by either one * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the * entire camera subsystem prevents the problem with frame * synchronization. */ static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam) { unsigned long flags; spin_lock_irqsave(&cam->core_enable_disable_lock, flags); if (atomic_read(&cam->in_reset) != 1) goto out; omap24xxcam_hwinit(cam); omap24xxcam_sensor_if_enable(cam); omap24xxcam_sgdma_process(&cam->sgdma); if (cam->sgdma_in_queue) omap24xxcam_core_enable(cam); out: atomic_dec(&cam->in_reset); spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); } static ssize_t omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap24xxcam_device *cam = dev_get_drvdata(dev); return sprintf(buf, "%s\n", cam->streaming ? "active" : "inactive"); } static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL); /* * Stop capture and restart it. I.e. reset the camera during use. */ static void omap24xxcam_reset(struct omap24xxcam_device *cam) { omap24xxcam_capture_stop(cam); omap24xxcam_capture_cont(cam); } /* * The main interrupt handler. */ static irqreturn_t omap24xxcam_isr(int irq, void *arg) { struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg; u32 irqstatus; unsigned int irqhandled = 0; irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS); if (irqstatus & (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1 | CAM_IRQSTATUS_DMA_IRQ0)) { omap24xxcam_dma_isr(&cam->sgdma.dma); irqhandled = 1; } if (irqstatus & CAM_IRQSTATUS_CC_IRQ) { omap24xxcam_core_isr(cam); irqhandled = 1; } if (irqstatus & CAM_IRQSTATUS_MMU_IRQ) dev_err(cam->dev, "unhandled camera MMU interrupt!\n"); return IRQ_RETVAL(irqhandled); } /* * * Sensor handling. * */ /* * Enable the external sensor interface. Try to negotiate interface * parameters with the sensor and start using the new ones. The calls * to sensor_if_enable and sensor_if_disable need not to be balanced. */ static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam) { int rval; struct v4l2_ifparm p; rval = vidioc_int_g_ifparm(cam->sdev, &p); if (rval) { dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval); return rval; } cam->if_type = p.if_type; cam->cc_ctrl = CC_CTRL_CC_EN; switch (p.if_type) { case V4L2_IF_TYPE_BT656: if (p.u.bt656.frame_start_on_rising_vs) cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO; if (p.u.bt656.bt_sync_correct) cam->cc_ctrl |= CC_CTRL_BT_CORRECT; if (p.u.bt656.swap) cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM; if (p.u.bt656.latch_clk_inv) cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL; if (p.u.bt656.nobt_hs_inv) cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL; if (p.u.bt656.nobt_vs_inv) cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL; switch (p.u.bt656.mode) { case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT: cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8; break; case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT: cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10; break; case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT: cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12; break; case V4L2_IF_TYPE_BT656_MODE_BT_8BIT: cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8; break; case V4L2_IF_TYPE_BT656_MODE_BT_10BIT: cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10; break; default: dev_err(cam->dev, "bt656 interface mode %d not supported\n", p.u.bt656.mode); return -EINVAL; } /* * The clock rate that the sensor wants has changed. * We have to adjust the xclk from OMAP 2 side to * match the sensor's wish as closely as possible. */ if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) { u32 xclk = p.u.bt656.clock_curr; u32 divisor; if (xclk == 0) return -EINVAL; if (xclk > CAM_MCLK) xclk = CAM_MCLK; divisor = CAM_MCLK / xclk; if (divisor * xclk < CAM_MCLK) divisor++; if (CAM_MCLK / divisor < p.u.bt656.clock_min && divisor > 1) divisor--; if (divisor > 30) divisor = 30; xclk = CAM_MCLK / divisor; if (xclk < p.u.bt656.clock_min || xclk > p.u.bt656.clock_max) return -EINVAL; cam->if_u.bt656.xclk = xclk; } omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk); break; default: /* FIXME: how about other interfaces? */ dev_err(cam->dev, "interface type %d not supported\n", p.if_type); return -EINVAL; } return 0; } static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam) { switch (cam->if_type) { case V4L2_IF_TYPE_BT656: omap24xxcam_core_xclk_set(cam, 0); break; } } /* * Initialise the sensor hardware. */ static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam) { int err = 0; struct v4l2_int_device *sdev = cam->sdev; omap24xxcam_clock_on(cam); err = omap24xxcam_sensor_if_enable(cam); if (err) { dev_err(cam->dev, "sensor interface could not be enabled at " "initialisation, %d\n", err); cam->sdev = NULL; goto out; } /* power up sensor during sensor initialization */ vidioc_int_s_power(sdev, 1); err = vidioc_int_dev_init(sdev); if (err) { dev_err(cam->dev, "cannot initialize sensor, error %d\n", err); /* Sensor init failed --- it's nonexistent to us! */ cam->sdev = NULL; goto out; } dev_info(cam->dev, "sensor is %s\n", sdev->name); out: omap24xxcam_sensor_if_disable(cam); omap24xxcam_clock_off(cam); vidioc_int_s_power(sdev, 0); return err; } static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam) { if (cam->sdev) vidioc_int_dev_exit(cam->sdev); } static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam) { omap24xxcam_sensor_if_disable(cam); omap24xxcam_clock_off(cam); vidioc_int_s_power(cam->sdev, 0); } /* * Power-up and configure camera sensor. It's ready for capturing now. */ static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam) { int rval; omap24xxcam_clock_on(cam); omap24xxcam_sensor_if_enable(cam); rval = vidioc_int_s_power(cam->sdev, 1); if (rval) goto out; rval = vidioc_int_init(cam->sdev); if (rval) goto out; return 0; out: omap24xxcam_sensor_disable(cam); return rval; } static void omap24xxcam_sensor_reset_work(struct work_struct *work) { struct omap24xxcam_device *cam = container_of(work, struct omap24xxcam_device, sensor_reset_work); if (atomic_read(&cam->reset_disable)) return; omap24xxcam_capture_stop(cam); if (vidioc_int_reset(cam->sdev) == 0) { vidioc_int_init(cam->sdev); } else { /* Can't reset it by vidioc_int_reset. */ omap24xxcam_sensor_disable(cam); omap24xxcam_sensor_enable(cam); } omap24xxcam_capture_cont(cam); } /* * * IOCTL interface. * */ static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver)); strlcpy(cap->card, cam->vfd->name, sizeof(cap->card)); cap->version = OMAP24XXCAM_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; rval = vidioc_int_enum_fmt_cap(cam->sdev, f); return rval; } static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); rval = vidioc_int_g_fmt_cap(cam->sdev, f); mutex_unlock(&cam->mutex); return rval; } static int vidioc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); if (cam->streaming) { rval = -EBUSY; goto out; } rval = vidioc_int_s_fmt_cap(cam->sdev, f); out: mutex_unlock(&cam->mutex); if (!rval) { mutex_lock(&ofh->vbq.vb_lock); ofh->pix = f->fmt.pix; mutex_unlock(&ofh->vbq.vb_lock); } memset(f, 0, sizeof(*f)); vidioc_g_fmt_vid_cap(file, fh, f); return rval; } static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); rval = vidioc_int_try_fmt_cap(cam->sdev, f); mutex_unlock(&cam->mutex); return rval; } static int vidioc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); if (cam->streaming) { mutex_unlock(&cam->mutex); return -EBUSY; } omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq); mutex_unlock(&cam->mutex); rval = videobuf_reqbufs(&ofh->vbq, b); /* * Either videobuf_reqbufs failed or the buffers are not * memory-mapped (which would need special attention). */ if (rval < 0 || b->memory != V4L2_MEMORY_MMAP) goto out; rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval); if (rval) omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq); out: return rval; } static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct omap24xxcam_fh *ofh = fh; return videobuf_querybuf(&ofh->vbq, b); } static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct omap24xxcam_fh *ofh = fh; return videobuf_qbuf(&ofh->vbq, b); } static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; struct videobuf_buffer *vb; int rval; videobuf_dqbuf_again: rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK); if (rval) goto out; vb = ofh->vbq.bufs[b->index]; mutex_lock(&cam->mutex); /* _needs_reset returns -EIO if reset is required. */ rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr); mutex_unlock(&cam->mutex); if (rval == -EIO) schedule_work(&cam->sensor_reset_work); else rval = 0; out: /* * This is a hack. We don't want to show -EIO to the user * space. Requeue the buffer and try again if we're not doing * this in non-blocking mode. */ if (rval == -EIO) { videobuf_qbuf(&ofh->vbq, b); if (!(file->f_flags & O_NONBLOCK)) goto videobuf_dqbuf_again; /* * We don't have a videobuf_buffer now --- maybe next * time... */ rval = -EAGAIN; } return rval; } static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); if (cam->streaming) { rval = -EBUSY; goto out; } rval = omap24xxcam_sensor_if_enable(cam); if (rval) { dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n"); goto out; } rval = videobuf_streamon(&ofh->vbq); if (!rval) { cam->streaming = file; sysfs_notify(&cam->dev->kobj, NULL, "streaming"); } out: mutex_unlock(&cam->mutex); return rval; } static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; struct videobuf_queue *q = &ofh->vbq; int rval; atomic_inc(&cam->reset_disable); flush_scheduled_work(); rval = videobuf_streamoff(q); if (!rval) { mutex_lock(&cam->mutex); cam->streaming = NULL; mutex_unlock(&cam->mutex); sysfs_notify(&cam->dev->kobj, NULL, "streaming"); } atomic_dec(&cam->reset_disable); return rval; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *inp) { if (inp->index > 0) return -EINVAL; strlcpy(inp->name, "camera", sizeof(inp->name)); inp->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *a) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; rval = vidioc_int_queryctrl(cam->sdev, a); return rval; } static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *a) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); rval = vidioc_int_g_ctrl(cam->sdev, a); mutex_unlock(&cam->mutex); return rval; } static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); rval = vidioc_int_s_ctrl(cam->sdev, a); mutex_unlock(&cam->mutex); return rval; } static int vidioc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; int rval; mutex_lock(&cam->mutex); rval = vidioc_int_g_parm(cam->sdev, a); mutex_unlock(&cam->mutex); return rval; } static int vidioc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct omap24xxcam_fh *ofh = fh; struct omap24xxcam_device *cam = ofh->cam; struct v4l2_streamparm old_streamparm; int rval; mutex_lock(&cam->mutex); if (cam->streaming) { rval = -EBUSY; goto out; } old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; rval = vidioc_int_g_parm(cam->sdev, &old_streamparm); if (rval) goto out; rval = vidioc_int_s_parm(cam->sdev, a); if (rval) goto out; rval = omap24xxcam_sensor_if_enable(cam); /* * Revert to old streaming parameters if enabling sensor * interface with the new ones failed. */ if (rval) vidioc_int_s_parm(cam->sdev, &old_streamparm); out: mutex_unlock(&cam->mutex); return rval; } /* * * File operations. * */ static unsigned int omap24xxcam_poll(struct file *file, struct poll_table_struct *wait) { struct omap24xxcam_fh *fh = file->private_data; struct omap24xxcam_device *cam = fh->cam; struct videobuf_buffer *vb; mutex_lock(&cam->mutex); if (cam->streaming != file) { mutex_unlock(&cam->mutex); return POLLERR; } mutex_unlock(&cam->mutex); mutex_lock(&fh->vbq.vb_lock); if (list_empty(&fh->vbq.stream)) { mutex_unlock(&fh->vbq.vb_lock); return POLLERR; } vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream); mutex_unlock(&fh->vbq.vb_lock); poll_wait(file, &vb->done, wait); if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR) return POLLIN | POLLRDNORM; return 0; } static int omap24xxcam_mmap_buffers(struct file *file, struct vm_area_struct *vma) { struct omap24xxcam_fh *fh = file->private_data; struct omap24xxcam_device *cam = fh->cam; struct videobuf_queue *vbq = &fh->vbq; unsigned int first, last, size, i, j; int err = 0; mutex_lock(&cam->mutex); if (cam->streaming) { mutex_unlock(&cam->mutex); return -EBUSY; } mutex_unlock(&cam->mutex); mutex_lock(&vbq->vb_lock); /* look for first buffer to map */ for (first = 0; first < VIDEO_MAX_FRAME; first++) { if (NULL == vbq->bufs[first]) continue; if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory) continue; if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT)) break; } /* look for last buffer to map */ for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) { if (NULL == vbq->bufs[last]) continue; if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory) continue; size += vbq->bufs[last]->bsize; if (size == (vma->vm_end - vma->vm_start)) break; } size = 0; for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) { struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]); for (j = 0; j < dma->sglen; j++) { err = remap_pfn_range( vma, vma->vm_start + size, page_to_pfn(sg_page(&dma->sglist[j])), sg_dma_len(&dma->sglist[j]), vma->vm_page_prot); if (err) goto out; size += sg_dma_len(&dma->sglist[j]); } } out: mutex_unlock(&vbq->vb_lock); return err; } static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma) { struct omap24xxcam_fh *fh = file->private_data; int rval; /* let the video-buf mapper check arguments and set-up structures */ rval = videobuf_mmap_mapper(&fh->vbq, vma); if (rval) return rval; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* do mapping to our allocated buffers */ rval = omap24xxcam_mmap_buffers(file, vma); /* * In case of error, free vma->vm_private_data allocated by * videobuf_mmap_mapper. */ if (rval) kfree(vma->vm_private_data); return rval; } static int omap24xxcam_open(struct file *file) { struct omap24xxcam_device *cam = omap24xxcam.priv; struct omap24xxcam_fh *fh; struct v4l2_format format; if (!cam || !cam->vfd) return -ENODEV; fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (fh == NULL) return -ENOMEM; mutex_lock(&cam->mutex); if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) { mutex_unlock(&cam->mutex); goto out_try_module_get; } if (atomic_inc_return(&cam->users) == 1) { omap24xxcam_hwinit(cam); if (omap24xxcam_sensor_enable(cam)) { mutex_unlock(&cam->mutex); goto out_omap24xxcam_sensor_enable; } } mutex_unlock(&cam->mutex); fh->cam = cam; mutex_lock(&cam->mutex); vidioc_int_g_fmt_cap(cam->sdev, &format); mutex_unlock(&cam->mutex); /* FIXME: how about fh->pix when there are more users? */ fh->pix = format.fmt.pix; file->private_data = fh; spin_lock_init(&fh->vbq_lock); videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL, &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct videobuf_buffer), fh); return 0; out_omap24xxcam_sensor_enable: omap24xxcam_poweron_reset(cam); module_put(cam->sdev->module); out_try_module_get: kfree(fh); return -ENODEV; } static int omap24xxcam_release(struct file *file) { struct omap24xxcam_fh *fh = file->private_data; struct omap24xxcam_device *cam = fh->cam; atomic_inc(&cam->reset_disable); flush_scheduled_work(); /* stop streaming capture */ videobuf_streamoff(&fh->vbq); mutex_lock(&cam->mutex); if (cam->streaming == file) { cam->streaming = NULL; mutex_unlock(&cam->mutex); sysfs_notify(&cam->dev->kobj, NULL, "streaming"); } else { mutex_unlock(&cam->mutex); } atomic_dec(&cam->reset_disable); omap24xxcam_vbq_free_mmap_buffers(&fh->vbq); /* * Make sure the reset work we might have scheduled is not * pending! It may be run *only* if we have users. (And it may * not be scheduled anymore since streaming is already * disabled.) */ flush_scheduled_work(); mutex_lock(&cam->mutex); if (atomic_dec_return(&cam->users) == 0) { omap24xxcam_sensor_disable(cam); omap24xxcam_poweron_reset(cam); } mutex_unlock(&cam->mutex); file->private_data = NULL; module_put(cam->sdev->module); kfree(fh); return 0; } static struct v4l2_file_operations omap24xxcam_fops = { .ioctl = video_ioctl2, .poll = omap24xxcam_poll, .mmap = omap24xxcam_mmap, .open = omap24xxcam_open, .release = omap24xxcam_release, }; /* * * Power management. * */ #ifdef CONFIG_PM static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state) { struct omap24xxcam_device *cam = platform_get_drvdata(pdev); if (atomic_read(&cam->users) == 0) return 0; if (!atomic_read(&cam->reset_disable)) omap24xxcam_capture_stop(cam); omap24xxcam_sensor_disable(cam); omap24xxcam_poweron_reset(cam); return 0; } static int omap24xxcam_resume(struct platform_device *pdev) { struct omap24xxcam_device *cam = platform_get_drvdata(pdev); if (atomic_read(&cam->users) == 0) return 0; omap24xxcam_hwinit(cam); omap24xxcam_sensor_enable(cam); if (!atomic_read(&cam->reset_disable)) omap24xxcam_capture_cont(cam); return 0; } #endif /* CONFIG_PM */ static const struct v4l2_ioctl_ops omap24xxcam_ioctl_fops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, }; /* * * Camera device (i.e. /dev/video). * */ static int omap24xxcam_device_register(struct v4l2_int_device *s) { struct omap24xxcam_device *cam = s->u.slave->master->priv; struct video_device *vfd; int rval; /* We already have a slave. */ if (cam->sdev) return -EBUSY; cam->sdev = s; if (device_create_file(cam->dev, &dev_attr_streaming) != 0) { dev_err(cam->dev, "could not register sysfs entry\n"); rval = -EBUSY; goto err; } /* initialize the video_device struct */ vfd = cam->vfd = video_device_alloc(); if (!vfd) { dev_err(cam->dev, "could not allocate video device struct\n"); rval = -ENOMEM; goto err; } vfd->release = video_device_release; vfd->parent = cam->dev; strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name)); vfd->fops = &omap24xxcam_fops; vfd->ioctl_ops = &omap24xxcam_ioctl_fops; omap24xxcam_hwinit(cam); rval = omap24xxcam_sensor_init(cam); if (rval) goto err; if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) { dev_err(cam->dev, "could not register V4L device\n"); rval = -EBUSY; goto err; } omap24xxcam_poweron_reset(cam); dev_info(cam->dev, "registered device %s\n", video_device_node_name(vfd)); return 0; err: omap24xxcam_device_unregister(s); return rval; } static void omap24xxcam_device_unregister(struct v4l2_int_device *s) { struct omap24xxcam_device *cam = s->u.slave->master->priv; omap24xxcam_sensor_exit(cam); if (cam->vfd) { if (!video_is_registered(cam->vfd)) { /* * The device was never registered, so release the * video_device struct directly. */ video_device_release(cam->vfd); } else { /* * The unregister function will release the * video_device struct as well as * unregistering it. */ video_unregister_device(cam->vfd); } cam->vfd = NULL; } device_remove_file(cam->dev, &dev_attr_streaming); cam->sdev = NULL; } static struct v4l2_int_master omap24xxcam_master = { .attach = omap24xxcam_device_register, .detach = omap24xxcam_device_unregister, }; static struct v4l2_int_device omap24xxcam = { .module = THIS_MODULE, .name = CAM_NAME, .type = v4l2_int_type_master, .u = { .master = &omap24xxcam_master }, }; /* * * Driver initialisation and deinitialisation. * */ static int __devinit omap24xxcam_probe(struct platform_device *pdev) { struct omap24xxcam_device *cam; struct resource *mem; int irq; cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) { dev_err(&pdev->dev, "could not allocate memory\n"); goto err; } platform_set_drvdata(pdev, cam); cam->dev = &pdev->dev; /* * Impose a lower limit on the amount of memory allocated for * capture. We require at least enough memory to double-buffer * QVGA (300KB). */ if (capture_mem < 320 * 240 * 2 * 2) capture_mem = 320 * 240 * 2 * 2; cam->capture_mem = capture_mem; /* request the mem region for the camera registers */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(cam->dev, "no mem resource?\n"); goto err; } if (!request_mem_region(mem->start, (mem->end - mem->start) + 1, pdev->name)) { dev_err(cam->dev, "cannot reserve camera register I/O region\n"); goto err; } cam->mmio_base_phys = mem->start; cam->mmio_size = (mem->end - mem->start) + 1; /* map the region */ cam->mmio_base = (unsigned long) ioremap_nocache(cam->mmio_base_phys, cam->mmio_size); if (!cam->mmio_base) { dev_err(cam->dev, "cannot map camera register I/O region\n"); goto err; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(cam->dev, "no irq for camera?\n"); goto err; } /* install the interrupt service routine */ if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) { dev_err(cam->dev, "could not install interrupt service routine\n"); goto err; } cam->irq = irq; if (omap24xxcam_clock_get(cam)) goto err; INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work); mutex_init(&cam->mutex); spin_lock_init(&cam->core_enable_disable_lock); omap24xxcam_sgdma_init(&cam->sgdma, cam->mmio_base + CAMDMA_REG_OFFSET, omap24xxcam_stalled_dma_reset, (unsigned long)cam); omap24xxcam.priv = cam; if (v4l2_int_device_register(&omap24xxcam)) goto err; return 0; err: omap24xxcam_remove(pdev); return -ENODEV; } static int omap24xxcam_remove(struct platform_device *pdev) { struct omap24xxcam_device *cam = platform_get_drvdata(pdev); if (!cam) return 0; if (omap24xxcam.priv != NULL) v4l2_int_device_unregister(&omap24xxcam); omap24xxcam.priv = NULL; omap24xxcam_clock_put(cam); if (cam->irq) { free_irq(cam->irq, cam); cam->irq = 0; } if (cam->mmio_base) { iounmap((void *)cam->mmio_base); cam->mmio_base = 0; } if (cam->mmio_base_phys) { release_mem_region(cam->mmio_base_phys, cam->mmio_size); cam->mmio_base_phys = 0; } kfree(cam); return 0; } static struct platform_driver omap24xxcam_driver = { .probe = omap24xxcam_probe, .remove = omap24xxcam_remove, #ifdef CONFIG_PM .suspend = omap24xxcam_suspend, .resume = omap24xxcam_resume, #endif .driver = { .name = CAM_NAME, .owner = THIS_MODULE, }, }; /* * * Module initialisation and deinitialisation * */ static int __init omap24xxcam_init(void) { return platform_driver_register(&omap24xxcam_driver); } static void __exit omap24xxcam_cleanup(void) { platform_driver_unregister(&omap24xxcam_driver); } MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>"); MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver"); MODULE_LICENSE("GPL"); module_param(video_nr, int, 0); MODULE_PARM_DESC(video_nr, "Minor number for video device (-1 ==> auto assign)"); module_param(capture_mem, int, 0); MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture " "buffers (default 4800kiB)"); module_init(omap24xxcam_init); module_exit(omap24xxcam_cleanup);
gpl-2.0
epic4g/samsung-kernel-c1spr-EK02
drivers/media/dvb/dvb-usb/dw2102.c
760
36616
/* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S600, S630, S650, * Prof 1100, 7500 Cards * Copyright (C) 2008,2009 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif #ifndef USB_PID_DW2104 #define USB_PID_DW2104 0x2104 #endif #ifndef USB_PID_DW3101 #define USB_PID_DW3101 0x3101 #endif #ifndef USB_PID_CINERGY_S #define USB_PID_CINERGY_S 0x0064 #endif #ifndef USB_PID_TEVII_S630 #define USB_PID_TEVII_S630 0xd630 #endif #ifndef USB_PID_TEVII_S650 #define USB_PID_TEVII_S650 0xd650 #endif #ifndef USB_PID_TEVII_S660 #define USB_PID_TEVII_S660 0xd660 #endif #ifndef USB_PID_PROF_1100 #define USB_PID_PROF_1100 0xb012 #endif #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define DW2102_RC_QUERY (0x1a00) #define err_str "did not find the firmware file. (%s) " \ "Please see linux/Documentation/dvb/ for more details " \ "on firmware-problems." struct ir_codes_dvb_usb_table_table { struct dvb_usb_rc_key *rc_keys; int rc_keys_size; }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* keymaps */ static int ir_keymap; module_param_named(keymap, ir_keymap, int, 0644); MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..."); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 " "4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 * data, u16 len, int flags) { int ret; u8 u8buf[len]; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index , u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0, ret = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { value = value + i; ret = dw210x_op_rw(d->udev, 0xb5, value, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { /* read from tuner */ ret = dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; int len, i, j; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len + 2]; ret = dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } mutex_unlock(&d->i2c_mutex); return num; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } mutex_unlock(&d->i2c_mutex); return num; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int ret = 0; int len, i, j; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[4]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 4, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 1, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len]; ret = dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if ((udev->descriptor.idProduct == 0x7500) && (j < (num - 1))) { /* write register addr before read */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0x92, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, (num > 1 ? 0x90 : 0x80), 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } msleep(3); } mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -1; } else { eepromline[i%16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -1; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int dw210x_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe != NULL) { if (dvb_attach(stb6100_attach, d->fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!\n"); return 0; } } } if (demod_probe & 2) { d->fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe != NULL) { if (dvb_attach(stv6110_attach, d->fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!\n"); return 0; } } } if (demod_probe & 1) { d->fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!\n"); return 0; } } d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!\n"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!\n"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(stb6000_attach, d->fe, 0x61, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!\n"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!\n"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe != NULL) { info("Attached tda10023!\n"); return 0; } return -EIO; } static int s6x0_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(zl10039_attach, d->fe, 0x60, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!\n"); return 0; } } d->fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(stb6000_attach, d->fe, 0x61, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288+stb6000!\n"); return 0; } } d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached ds3000+ds2020!\n"); return 0; } return -EIO; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe == NULL) return -EIO; d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100A!\n"); return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static struct dvb_usb_rc_key ir_codes_dw210x_table[] = { { 0xf80a, KEY_Q }, /*power*/ { 0xf80c, KEY_M }, /*mute*/ { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_PAGEUP }, /*ch+*/ { 0xf80f, KEY_PAGEDOWN }, /*ch-*/ { 0xf81a, KEY_O }, /*vol+*/ { 0xf80e, KEY_Z }, /*vol-*/ { 0xf804, KEY_R }, /*rec*/ { 0xf809, KEY_D }, /*fav*/ { 0xf808, KEY_BACKSPACE }, /*rewind*/ { 0xf807, KEY_A }, /*fast*/ { 0xf80b, KEY_P }, /*pause*/ { 0xf802, KEY_ESC }, /*cancel*/ { 0xf803, KEY_G }, /*tab*/ { 0xf800, KEY_UP }, /*up*/ { 0xf81f, KEY_ENTER }, /*ok*/ { 0xf801, KEY_DOWN }, /*down*/ { 0xf805, KEY_C }, /*cap*/ { 0xf806, KEY_S }, /*stop*/ { 0xf840, KEY_F }, /*full*/ { 0xf81e, KEY_W }, /*tvmode*/ { 0xf81b, KEY_B }, /*recall*/ }; static struct dvb_usb_rc_key ir_codes_tevii_table[] = { { 0xf80a, KEY_POWER }, { 0xf80c, KEY_MUTE }, { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_MENU }, { 0xf80f, KEY_VOLUMEDOWN }, { 0xf81a, KEY_LAST }, { 0xf80e, KEY_OPEN }, { 0xf804, KEY_RECORD }, { 0xf809, KEY_VOLUMEUP }, { 0xf808, KEY_CHANNELUP }, { 0xf807, KEY_PVR }, { 0xf80b, KEY_TIME }, { 0xf802, KEY_RIGHT }, { 0xf803, KEY_LEFT }, { 0xf800, KEY_UP }, { 0xf81f, KEY_OK }, { 0xf801, KEY_DOWN }, { 0xf805, KEY_TUNER }, { 0xf806, KEY_CHANNELDOWN }, { 0xf840, KEY_PLAYPAUSE }, { 0xf81e, KEY_REWIND }, { 0xf81b, KEY_FAVORITES }, { 0xf81d, KEY_BACK }, { 0xf84d, KEY_FASTFORWARD }, { 0xf844, KEY_EPG }, { 0xf84c, KEY_INFO }, { 0xf841, KEY_AB }, { 0xf843, KEY_AUDIO }, { 0xf845, KEY_SUBTITLE }, { 0xf84a, KEY_LIST }, { 0xf846, KEY_F1 }, { 0xf847, KEY_F2 }, { 0xf85e, KEY_F3 }, { 0xf85c, KEY_F4 }, { 0xf852, KEY_F5 }, { 0xf85a, KEY_F6 }, { 0xf856, KEY_MODE }, { 0xf858, KEY_SWITCHVIDEOMODE }, }; static struct dvb_usb_rc_key ir_codes_tbs_table[] = { { 0xf884, KEY_POWER }, { 0xf894, KEY_MUTE }, { 0xf887, KEY_1 }, { 0xf886, KEY_2 }, { 0xf885, KEY_3 }, { 0xf88b, KEY_4 }, { 0xf88a, KEY_5 }, { 0xf889, KEY_6 }, { 0xf88f, KEY_7 }, { 0xf88e, KEY_8 }, { 0xf88d, KEY_9 }, { 0xf892, KEY_0 }, { 0xf896, KEY_CHANNELUP }, { 0xf891, KEY_CHANNELDOWN }, { 0xf893, KEY_VOLUMEUP }, { 0xf88c, KEY_VOLUMEDOWN }, { 0xf883, KEY_RECORD }, { 0xf898, KEY_PAUSE }, { 0xf899, KEY_OK }, { 0xf89a, KEY_SHUFFLE }, { 0xf881, KEY_UP }, { 0xf890, KEY_LEFT }, { 0xf882, KEY_RIGHT }, { 0xf888, KEY_DOWN }, { 0xf895, KEY_FAVORITES }, { 0xf897, KEY_SUBTITLE }, { 0xf89d, KEY_ZOOM }, { 0xf89f, KEY_EXIT }, { 0xf89e, KEY_MENU }, { 0xf89c, KEY_EPG }, { 0xf880, KEY_PREVIOUS }, { 0xf89b, KEY_MODE } }; static struct ir_codes_dvb_usb_table_table keys_tables[] = { { ir_codes_dw210x_table, ARRAY_SIZE(ir_codes_dw210x_table) }, { ir_codes_tevii_table, ARRAY_SIZE(ir_codes_tevii_table) }, { ir_codes_tbs_table, ARRAY_SIZE(ir_codes_tbs_table) }, }; static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct dvb_usb_rc_key *keymap = d->props.rc_key_map; int keymap_size = d->props.rc_key_map_size; u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; int i; /* override keymap */ if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) { keymap = keys_tables[ir_keymap - 1].rc_keys ; keymap_size = keys_tables[ir_keymap - 1].rc_keys_size; } *state = REMOTE_NO_KEY_PRESSED; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { for (i = 0; i < keymap_size ; i++) { if (rc5_data(&keymap[i]) == msg.buf[0]) { *state = REMOTE_KEY_PRESSED; *event = keymap[i].event; break; } } if ((*state) == REMOTE_KEY_PRESSED) deb_rc("%s: found rc key: %x, %x, event: %x\n", __func__, key[0], key[1], (*event)); else if (key[0] != 0xff) deb_rc("%s: unknown rc key: %x, %x\n", __func__, key[0], key[1]); } return 0; } static struct usb_device_id dw2102_table[] = { {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)}, {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S650)}, {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S630)}, {USB_DEVICE(0x3011, USB_PID_PROF_1100)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S660)}, {USB_DEVICE(0x3034, 0x7500)}, { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; const char *fw_2101 = "dvb-usb-dw2101.fw"; switch (dev->descriptor.idProduct) { case 0x2101: ret = request_firmware(&fw, fw_2101, &dev->dev); if (ret != 0) { err(err_str, fw_2101); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p != NULL) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *) p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b , 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (dev->descriptor.idProduct) { case USB_PID_PROF_1100: s6x0_properties.rc_key_map = ir_codes_tbs_table; s6x0_properties.rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table); break; case USB_PID_TEVII_S650: dw2104_properties.rc_key_map = ir_codes_tevii_table; dw2104_properties.rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table); case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); /* break omitted intentionally */ case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_CINERGY_S: case USB_PID_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->tuner_attach = &dw2102_tuner_attach; break; } else { /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } } case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2102.fw", .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw2102_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = NULL, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[0], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[1], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[4], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2104.fw", .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw2104_frontend_attach, .streaming_ctrl = NULL, /*.tuner_attach = dw2104_tuner_attach,*/ .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[2], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[3], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw3101.fw", .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw3101_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[5], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-s630.fw", .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc_key_map = ir_codes_tevii_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .frontend_attach = s6x0_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = NULL, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 3, .devices = { {"TeVii S630 USB", {&dw2102_table[6], NULL}, {NULL}, }, {"Prof 1100 USB ", {&dw2102_table[7], NULL}, {NULL}, }, {"TeVii S660 USB", {&dw2102_table[8], NULL}, {NULL}, }, } }; struct dvb_usb_device_properties *p7500; static struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[9], NULL}, {NULL}, }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { p7500 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p7500) return -ENOMEM; /* copy default structure */ memcpy(p7500, &s6x0_properties, sizeof(struct dvb_usb_device_properties)); /* fill only different fields */ p7500->firmware = "dvb-usb-p7500.fw"; p7500->devices[0] = d7500; p7500->rc_key_map = ir_codes_tbs_table; p7500->rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table); p7500->adapter->frontend_attach = prof_7500_frontend_attach; if (0 == dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p7500, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dvb_usb_device_exit, .id_table = dw2102_table, }; static int __init dw2102_module_init(void) { int ret = usb_register(&dw2102_driver); if (ret) err("usb_register failed. Error number %d", ret); return ret; } static void __exit dw2102_module_exit(void) { usb_deregister(&dw2102_driver); } module_init(dw2102_module_init); module_exit(dw2102_module_exit); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104," " DVB-C 3101 USB2.0," " TeVii S600, S630, S650, S660 USB2.0," " Prof 1100, 7500 USB2.0 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
lizan/isw11sc-kernel
drivers/media/dvb/dvb-usb/dw2102.c
760
36616
/* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S600, S630, S650, * Prof 1100, 7500 Cards * Copyright (C) 2008,2009 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif #ifndef USB_PID_DW2104 #define USB_PID_DW2104 0x2104 #endif #ifndef USB_PID_DW3101 #define USB_PID_DW3101 0x3101 #endif #ifndef USB_PID_CINERGY_S #define USB_PID_CINERGY_S 0x0064 #endif #ifndef USB_PID_TEVII_S630 #define USB_PID_TEVII_S630 0xd630 #endif #ifndef USB_PID_TEVII_S650 #define USB_PID_TEVII_S650 0xd650 #endif #ifndef USB_PID_TEVII_S660 #define USB_PID_TEVII_S660 0xd660 #endif #ifndef USB_PID_PROF_1100 #define USB_PID_PROF_1100 0xb012 #endif #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define DW2102_RC_QUERY (0x1a00) #define err_str "did not find the firmware file. (%s) " \ "Please see linux/Documentation/dvb/ for more details " \ "on firmware-problems." struct ir_codes_dvb_usb_table_table { struct dvb_usb_rc_key *rc_keys; int rc_keys_size; }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* keymaps */ static int ir_keymap; module_param_named(keymap, ir_keymap, int, 0644); MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..."); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 " "4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 * data, u16 len, int flags) { int ret; u8 u8buf[len]; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index , u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0, ret = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { value = value + i; ret = dw210x_op_rw(d->udev, 0xb5, value, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { /* read from tuner */ ret = dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; int len, i, j; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len + 2]; ret = dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } mutex_unlock(&d->i2c_mutex); return num; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } mutex_unlock(&d->i2c_mutex); return num; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int ret = 0; int len, i, j; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[4]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 4, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 1, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len]; ret = dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if ((udev->descriptor.idProduct == 0x7500) && (j < (num - 1))) { /* write register addr before read */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0x92, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, (num > 1 ? 0x90 : 0x80), 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } msleep(3); } mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -1; } else { eepromline[i%16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -1; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int dw210x_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe != NULL) { if (dvb_attach(stb6100_attach, d->fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!\n"); return 0; } } } if (demod_probe & 2) { d->fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe != NULL) { if (dvb_attach(stv6110_attach, d->fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!\n"); return 0; } } } if (demod_probe & 1) { d->fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!\n"); return 0; } } d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!\n"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!\n"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(stb6000_attach, d->fe, 0x61, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!\n"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!\n"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe != NULL) { info("Attached tda10023!\n"); return 0; } return -EIO; } static int s6x0_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(zl10039_attach, d->fe, 0x60, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!\n"); return 0; } } d->fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe != NULL) { if (dvb_attach(stb6000_attach, d->fe, 0x61, &d->dev->i2c_adap)) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288+stb6000!\n"); return 0; } } d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe != NULL) { d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached ds3000+ds2020!\n"); return 0; } return -EIO; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { d->fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe == NULL) return -EIO; d->fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100A!\n"); return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static struct dvb_usb_rc_key ir_codes_dw210x_table[] = { { 0xf80a, KEY_Q }, /*power*/ { 0xf80c, KEY_M }, /*mute*/ { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_PAGEUP }, /*ch+*/ { 0xf80f, KEY_PAGEDOWN }, /*ch-*/ { 0xf81a, KEY_O }, /*vol+*/ { 0xf80e, KEY_Z }, /*vol-*/ { 0xf804, KEY_R }, /*rec*/ { 0xf809, KEY_D }, /*fav*/ { 0xf808, KEY_BACKSPACE }, /*rewind*/ { 0xf807, KEY_A }, /*fast*/ { 0xf80b, KEY_P }, /*pause*/ { 0xf802, KEY_ESC }, /*cancel*/ { 0xf803, KEY_G }, /*tab*/ { 0xf800, KEY_UP }, /*up*/ { 0xf81f, KEY_ENTER }, /*ok*/ { 0xf801, KEY_DOWN }, /*down*/ { 0xf805, KEY_C }, /*cap*/ { 0xf806, KEY_S }, /*stop*/ { 0xf840, KEY_F }, /*full*/ { 0xf81e, KEY_W }, /*tvmode*/ { 0xf81b, KEY_B }, /*recall*/ }; static struct dvb_usb_rc_key ir_codes_tevii_table[] = { { 0xf80a, KEY_POWER }, { 0xf80c, KEY_MUTE }, { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_MENU }, { 0xf80f, KEY_VOLUMEDOWN }, { 0xf81a, KEY_LAST }, { 0xf80e, KEY_OPEN }, { 0xf804, KEY_RECORD }, { 0xf809, KEY_VOLUMEUP }, { 0xf808, KEY_CHANNELUP }, { 0xf807, KEY_PVR }, { 0xf80b, KEY_TIME }, { 0xf802, KEY_RIGHT }, { 0xf803, KEY_LEFT }, { 0xf800, KEY_UP }, { 0xf81f, KEY_OK }, { 0xf801, KEY_DOWN }, { 0xf805, KEY_TUNER }, { 0xf806, KEY_CHANNELDOWN }, { 0xf840, KEY_PLAYPAUSE }, { 0xf81e, KEY_REWIND }, { 0xf81b, KEY_FAVORITES }, { 0xf81d, KEY_BACK }, { 0xf84d, KEY_FASTFORWARD }, { 0xf844, KEY_EPG }, { 0xf84c, KEY_INFO }, { 0xf841, KEY_AB }, { 0xf843, KEY_AUDIO }, { 0xf845, KEY_SUBTITLE }, { 0xf84a, KEY_LIST }, { 0xf846, KEY_F1 }, { 0xf847, KEY_F2 }, { 0xf85e, KEY_F3 }, { 0xf85c, KEY_F4 }, { 0xf852, KEY_F5 }, { 0xf85a, KEY_F6 }, { 0xf856, KEY_MODE }, { 0xf858, KEY_SWITCHVIDEOMODE }, }; static struct dvb_usb_rc_key ir_codes_tbs_table[] = { { 0xf884, KEY_POWER }, { 0xf894, KEY_MUTE }, { 0xf887, KEY_1 }, { 0xf886, KEY_2 }, { 0xf885, KEY_3 }, { 0xf88b, KEY_4 }, { 0xf88a, KEY_5 }, { 0xf889, KEY_6 }, { 0xf88f, KEY_7 }, { 0xf88e, KEY_8 }, { 0xf88d, KEY_9 }, { 0xf892, KEY_0 }, { 0xf896, KEY_CHANNELUP }, { 0xf891, KEY_CHANNELDOWN }, { 0xf893, KEY_VOLUMEUP }, { 0xf88c, KEY_VOLUMEDOWN }, { 0xf883, KEY_RECORD }, { 0xf898, KEY_PAUSE }, { 0xf899, KEY_OK }, { 0xf89a, KEY_SHUFFLE }, { 0xf881, KEY_UP }, { 0xf890, KEY_LEFT }, { 0xf882, KEY_RIGHT }, { 0xf888, KEY_DOWN }, { 0xf895, KEY_FAVORITES }, { 0xf897, KEY_SUBTITLE }, { 0xf89d, KEY_ZOOM }, { 0xf89f, KEY_EXIT }, { 0xf89e, KEY_MENU }, { 0xf89c, KEY_EPG }, { 0xf880, KEY_PREVIOUS }, { 0xf89b, KEY_MODE } }; static struct ir_codes_dvb_usb_table_table keys_tables[] = { { ir_codes_dw210x_table, ARRAY_SIZE(ir_codes_dw210x_table) }, { ir_codes_tevii_table, ARRAY_SIZE(ir_codes_tevii_table) }, { ir_codes_tbs_table, ARRAY_SIZE(ir_codes_tbs_table) }, }; static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct dvb_usb_rc_key *keymap = d->props.rc_key_map; int keymap_size = d->props.rc_key_map_size; u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; int i; /* override keymap */ if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) { keymap = keys_tables[ir_keymap - 1].rc_keys ; keymap_size = keys_tables[ir_keymap - 1].rc_keys_size; } *state = REMOTE_NO_KEY_PRESSED; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { for (i = 0; i < keymap_size ; i++) { if (rc5_data(&keymap[i]) == msg.buf[0]) { *state = REMOTE_KEY_PRESSED; *event = keymap[i].event; break; } } if ((*state) == REMOTE_KEY_PRESSED) deb_rc("%s: found rc key: %x, %x, event: %x\n", __func__, key[0], key[1], (*event)); else if (key[0] != 0xff) deb_rc("%s: unknown rc key: %x, %x\n", __func__, key[0], key[1]); } return 0; } static struct usb_device_id dw2102_table[] = { {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)}, {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S650)}, {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S630)}, {USB_DEVICE(0x3011, USB_PID_PROF_1100)}, {USB_DEVICE(0x9022, USB_PID_TEVII_S660)}, {USB_DEVICE(0x3034, 0x7500)}, { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; const char *fw_2101 = "dvb-usb-dw2101.fw"; switch (dev->descriptor.idProduct) { case 0x2101: ret = request_firmware(&fw, fw_2101, &dev->dev); if (ret != 0) { err(err_str, fw_2101); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p != NULL) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *) p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b , 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (dev->descriptor.idProduct) { case USB_PID_PROF_1100: s6x0_properties.rc_key_map = ir_codes_tbs_table; s6x0_properties.rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table); break; case USB_PID_TEVII_S650: dw2104_properties.rc_key_map = ir_codes_tevii_table; dw2104_properties.rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table); case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); /* break omitted intentionally */ case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_CINERGY_S: case USB_PID_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->tuner_attach = &dw2102_tuner_attach; break; } else { /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } } case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2102.fw", .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw2102_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = NULL, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[0], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[1], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[4], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2104.fw", .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw2104_frontend_attach, .streaming_ctrl = NULL, /*.tuner_attach = dw2104_tuner_attach,*/ .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[2], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[3], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw3101.fw", .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc_key_map = ir_codes_dw210x_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .frontend_attach = dw3101_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[5], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-s630.fw", .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc_key_map = ir_codes_tevii_table, .rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table), .rc_interval = 150, .rc_query = dw2102_rc_query, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .frontend_attach = s6x0_frontend_attach, .streaming_ctrl = NULL, .tuner_attach = NULL, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .num_device_descs = 3, .devices = { {"TeVii S630 USB", {&dw2102_table[6], NULL}, {NULL}, }, {"Prof 1100 USB ", {&dw2102_table[7], NULL}, {NULL}, }, {"TeVii S660 USB", {&dw2102_table[8], NULL}, {NULL}, }, } }; struct dvb_usb_device_properties *p7500; static struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[9], NULL}, {NULL}, }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { p7500 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p7500) return -ENOMEM; /* copy default structure */ memcpy(p7500, &s6x0_properties, sizeof(struct dvb_usb_device_properties)); /* fill only different fields */ p7500->firmware = "dvb-usb-p7500.fw"; p7500->devices[0] = d7500; p7500->rc_key_map = ir_codes_tbs_table; p7500->rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table); p7500->adapter->frontend_attach = prof_7500_frontend_attach; if (0 == dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p7500, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dvb_usb_device_exit, .id_table = dw2102_table, }; static int __init dw2102_module_init(void) { int ret = usb_register(&dw2102_driver); if (ret) err("usb_register failed. Error number %d", ret); return ret; } static void __exit dw2102_module_exit(void) { usb_deregister(&dw2102_driver); } module_init(dw2102_module_init); module_exit(dw2102_module_exit); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104," " DVB-C 3101 USB2.0," " TeVii S600, S630, S650, S660 USB2.0," " Prof 1100, 7500 USB2.0 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
Fusion-Devices/android_kernel_samsung_klte
drivers/block/sunvdc.c
1528
19476
/* sunvdc.c: Sun LDOM Virtual Disk Client. * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/genhd.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/list.h> #include <linux/scatterlist.h> #include <asm/vio.h> #include <asm/ldc.h> #define DRV_MODULE_NAME "sunvdc" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" #define DRV_MODULE_RELDATE "June 25, 2007" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #define VDC_TX_RING_SIZE 256 #define WAITING_FOR_LINK_UP 0x01 #define WAITING_FOR_TX_SPACE 0x02 #define WAITING_FOR_GEN_CMD 0x04 #define WAITING_FOR_ANY -1 struct vdc_req_entry { struct request *req; }; struct vdc_port { struct vio_driver_state vio; struct gendisk *disk; struct vdc_completion *cmp; u64 req_id; u64 seq; struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE]; unsigned long ring_cookies; u64 max_xfer_size; u32 vdisk_block_size; /* The server fills these in for us in the disk attribute * ACK packet. */ u64 operations; u32 vdisk_size; u8 vdisk_type; char disk_name[32]; struct vio_disk_geom geom; struct vio_disk_vtoc label; }; static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) { return container_of(vio, struct vdc_port, vio); } /* Ordered from largest major to lowest */ static struct vio_version vdc_versions[] = { { .major = 1, .minor = 0 }, }; #define VDCBLK_NAME "vdisk" static int vdc_major; #define PARTITION_SHIFT 3 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) { return vio_dring_avail(dr, VDC_TX_RING_SIZE); } static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct gendisk *disk = bdev->bd_disk; struct vdc_port *port = disk->private_data; geo->heads = (u8) port->geom.num_hd; geo->sectors = (u8) port->geom.num_sec; geo->cylinders = port->geom.num_cyl; return 0; } static const struct block_device_operations vdc_fops = { .owner = THIS_MODULE, .getgeo = vdc_getgeo, }; static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) { if (vio->cmp && (waiting_for == -1 || vio->cmp->waiting_for == waiting_for)) { vio->cmp->err = err; complete(&vio->cmp->com); vio->cmp = NULL; } } static void vdc_handshake_complete(struct vio_driver_state *vio) { vdc_finish(vio, 0, WAITING_FOR_LINK_UP); } static int vdc_handle_unknown(struct vdc_port *port, void *arg) { struct vio_msg_tag *pkt = arg; printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", pkt->type, pkt->stype, pkt->stype_env, pkt->sid); printk(KERN_ERR PFX "Resetting connection.\n"); ldc_disconnect(port->vio.lp); return -ECONNRESET; } static int vdc_send_attr(struct vio_driver_state *vio) { struct vdc_port *port = to_vdc_port(vio); struct vio_disk_attr_info pkt; memset(&pkt, 0, sizeof(pkt)); pkt.tag.type = VIO_TYPE_CTRL; pkt.tag.stype = VIO_SUBTYPE_INFO; pkt.tag.stype_env = VIO_ATTR_INFO; pkt.tag.sid = vio_send_sid(vio); pkt.xfer_mode = VIO_DRING_MODE; pkt.vdisk_block_size = port->vdisk_block_size; pkt.max_xfer_size = port->max_xfer_size; viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size); return vio_ldc_send(&port->vio, &pkt, sizeof(pkt)); } static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) { struct vdc_port *port = to_vdc_port(vio); struct vio_disk_attr_info *pkt = arg; viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] " "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", pkt->tag.stype, pkt->operations, pkt->vdisk_size, pkt->vdisk_type, pkt->xfer_mode, pkt->vdisk_block_size, pkt->max_xfer_size); if (pkt->tag.stype == VIO_SUBTYPE_ACK) { switch (pkt->vdisk_type) { case VD_DISK_TYPE_DISK: case VD_DISK_TYPE_SLICE: break; default: printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n", vio->name, pkt->vdisk_type); return -ECONNRESET; } if (pkt->vdisk_block_size > port->vdisk_block_size) { printk(KERN_ERR PFX "%s: BLOCK size increased " "%u --> %u\n", vio->name, port->vdisk_block_size, pkt->vdisk_block_size); return -ECONNRESET; } port->operations = pkt->operations; port->vdisk_size = pkt->vdisk_size; port->vdisk_type = pkt->vdisk_type; if (pkt->max_xfer_size < port->max_xfer_size) port->max_xfer_size = pkt->max_xfer_size; port->vdisk_block_size = pkt->vdisk_block_size; return 0; } else { printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); return -ECONNRESET; } } static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc) { int err = desc->status; vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); } static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, unsigned int index) { struct vio_disk_desc *desc = vio_dring_entry(dr, index); struct vdc_req_entry *rqe = &port->rq_arr[index]; struct request *req; if (unlikely(desc->hdr.state != VIO_DESC_DONE)) return; ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); desc->hdr.state = VIO_DESC_FREE; dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); req = rqe->req; if (req == NULL) { vdc_end_special(port, desc); return; } rqe->req = NULL; __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); if (blk_queue_stopped(port->disk->queue)) blk_start_queue(port->disk->queue); } static int vdc_ack(struct vdc_port *port, void *msgbuf) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data *pkt = msgbuf; if (unlikely(pkt->dring_ident != dr->ident || pkt->start_idx != pkt->end_idx || pkt->start_idx >= VDC_TX_RING_SIZE)) return 0; vdc_end_one(port, dr, pkt->start_idx); return 0; } static int vdc_nack(struct vdc_port *port, void *msgbuf) { /* XXX Implement me XXX */ return 0; } static void vdc_event(void *arg, int event) { struct vdc_port *port = arg; struct vio_driver_state *vio = &port->vio; unsigned long flags; int err; spin_lock_irqsave(&vio->lock, flags); if (unlikely(event == LDC_EVENT_RESET || event == LDC_EVENT_UP)) { vio_link_state_change(vio, event); spin_unlock_irqrestore(&vio->lock, flags); return; } if (unlikely(event != LDC_EVENT_DATA_READY)) { printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); spin_unlock_irqrestore(&vio->lock, flags); return; } err = 0; while (1) { union { struct vio_msg_tag tag; u64 raw[8]; } msgbuf; err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); if (unlikely(err < 0)) { if (err == -ECONNRESET) vio_conn_reset(vio); break; } if (err == 0) break; viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", msgbuf.tag.type, msgbuf.tag.stype, msgbuf.tag.stype_env, msgbuf.tag.sid); err = vio_validate_sid(vio, &msgbuf.tag); if (err < 0) break; if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) err = vdc_ack(port, &msgbuf); else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) err = vdc_nack(port, &msgbuf); else err = vdc_handle_unknown(port, &msgbuf); } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { err = vio_control_pkt_engine(vio, &msgbuf); } else { err = vdc_handle_unknown(port, &msgbuf); } if (err < 0) break; } if (err < 0) vdc_finish(&port->vio, err, WAITING_FOR_ANY); spin_unlock_irqrestore(&vio->lock, flags); } static int __vdc_tx_trigger(struct vdc_port *port) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data hdr = { .tag = { .type = VIO_TYPE_DATA, .stype = VIO_SUBTYPE_INFO, .stype_env = VIO_DRING_DATA, .sid = vio_send_sid(&port->vio), }, .dring_ident = dr->ident, .start_idx = dr->prod, .end_idx = dr->prod, }; int err, delay; hdr.seq = dr->snd_nxt; delay = 1; do { err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); if (err > 0) { dr->snd_nxt++; break; } udelay(delay); if ((delay <<= 1) > 128) delay = 128; } while (err == -EAGAIN); return err; } static int __send_request(struct request *req) { struct vdc_port *port = req->rq_disk->private_data; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct scatterlist sg[port->ring_cookies]; struct vdc_req_entry *rqe; struct vio_disk_desc *desc; unsigned int map_perm; int nsg, err, i; u64 len; u8 op; map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; if (rq_data_dir(req) == READ) { map_perm |= LDC_MAP_W; op = VD_OP_BREAD; } else { map_perm |= LDC_MAP_R; op = VD_OP_BWRITE; } sg_init_table(sg, port->ring_cookies); nsg = blk_rq_map_sg(req->q, req, sg); len = 0; for (i = 0; i < nsg; i++) len += sg[i].length; if (unlikely(vdc_tx_dring_avail(dr) < 1)) { blk_stop_queue(port->disk->queue); err = -ENOMEM; goto out; } desc = vio_dring_cur(dr); err = ldc_map_sg(port->vio.lp, sg, nsg, desc->cookies, port->ring_cookies, map_perm); if (err < 0) { printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err); return err; } rqe = &port->rq_arr[dr->prod]; rqe->req = req; desc->hdr.ack = VIO_ACK_ENABLE; desc->req_id = port->req_id; desc->operation = op; if (port->vdisk_type == VD_DISK_TYPE_DISK) { desc->slice = 0xff; } else { desc->slice = 0; } desc->status = ~0; desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size; desc->size = len; desc->ncookies = err; /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. */ wmb(); desc->hdr.state = VIO_DESC_READY; err = __vdc_tx_trigger(port); if (err < 0) { printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); } else { port->req_id++; dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); } out: return err; } static void do_vdc_request(struct request_queue *q) { while (1) { struct request *req = blk_fetch_request(q); if (!req) break; if (__send_request(req) < 0) __blk_end_request_all(req, -EIO); } } static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) { struct vio_dring_state *dr; struct vio_completion comp; struct vio_disk_desc *desc; unsigned int map_perm; unsigned long flags; int op_len, err; void *req_buf; if (!(((u64)1 << (u64)op) & port->operations)) return -EOPNOTSUPP; switch (op) { case VD_OP_BREAD: case VD_OP_BWRITE: default: return -EINVAL; case VD_OP_FLUSH: op_len = 0; map_perm = 0; break; case VD_OP_GET_WCE: op_len = sizeof(u32); map_perm = LDC_MAP_W; break; case VD_OP_SET_WCE: op_len = sizeof(u32); map_perm = LDC_MAP_R; break; case VD_OP_GET_VTOC: op_len = sizeof(struct vio_disk_vtoc); map_perm = LDC_MAP_W; break; case VD_OP_SET_VTOC: op_len = sizeof(struct vio_disk_vtoc); map_perm = LDC_MAP_R; break; case VD_OP_GET_DISKGEOM: op_len = sizeof(struct vio_disk_geom); map_perm = LDC_MAP_W; break; case VD_OP_SET_DISKGEOM: op_len = sizeof(struct vio_disk_geom); map_perm = LDC_MAP_R; break; case VD_OP_SCSICMD: op_len = 16; map_perm = LDC_MAP_RW; break; case VD_OP_GET_DEVID: op_len = sizeof(struct vio_disk_devid); map_perm = LDC_MAP_W; break; case VD_OP_GET_EFI: case VD_OP_SET_EFI: return -EOPNOTSUPP; break; }; map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; op_len = (op_len + 7) & ~7; req_buf = kzalloc(op_len, GFP_KERNEL); if (!req_buf) return -ENOMEM; if (len > op_len) len = op_len; if (map_perm & LDC_MAP_R) memcpy(req_buf, buf, len); spin_lock_irqsave(&port->vio.lock, flags); dr = &port->vio.drings[VIO_DRIVER_TX_RING]; /* XXX If we want to use this code generically we have to * XXX handle TX ring exhaustion etc. */ desc = vio_dring_cur(dr); err = ldc_map_single(port->vio.lp, req_buf, op_len, desc->cookies, port->ring_cookies, map_perm); if (err < 0) { spin_unlock_irqrestore(&port->vio.lock, flags); kfree(req_buf); return err; } init_completion(&comp.com); comp.waiting_for = WAITING_FOR_GEN_CMD; port->vio.cmp = &comp; desc->hdr.ack = VIO_ACK_ENABLE; desc->req_id = port->req_id; desc->operation = op; desc->slice = 0; desc->status = ~0; desc->offset = 0; desc->size = op_len; desc->ncookies = err; /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. */ wmb(); desc->hdr.state = VIO_DESC_READY; err = __vdc_tx_trigger(port); if (err >= 0) { port->req_id++; dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); spin_unlock_irqrestore(&port->vio.lock, flags); wait_for_completion(&comp.com); err = comp.err; } else { port->vio.cmp = NULL; spin_unlock_irqrestore(&port->vio.lock, flags); } if (map_perm & LDC_MAP_W) memcpy(buf, req_buf, len); kfree(req_buf); return err; } static int __devinit vdc_alloc_tx_ring(struct vdc_port *port) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; unsigned long len, entry_size; int ncookies; void *dring; entry_size = sizeof(struct vio_disk_desc) + (sizeof(struct ldc_trans_cookie) * port->ring_cookies); len = (VDC_TX_RING_SIZE * entry_size); ncookies = VIO_MAX_RING_COOKIES; dring = ldc_alloc_exp_dring(port->vio.lp, len, dr->cookies, &ncookies, (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); if (IS_ERR(dring)) return PTR_ERR(dring); dr->base = dring; dr->entry_size = entry_size; dr->num_entries = VDC_TX_RING_SIZE; dr->prod = dr->cons = 0; dr->pending = VDC_TX_RING_SIZE; dr->ncookies = ncookies; return 0; } static void vdc_free_tx_ring(struct vdc_port *port) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; if (dr->base) { ldc_free_exp_dring(port->vio.lp, dr->base, (dr->entry_size * dr->num_entries), dr->cookies, dr->ncookies); dr->base = NULL; dr->entry_size = 0; dr->num_entries = 0; dr->pending = 0; dr->ncookies = 0; } } static int probe_disk(struct vdc_port *port) { struct vio_completion comp; struct request_queue *q; struct gendisk *g; int err; init_completion(&comp.com); comp.err = 0; comp.waiting_for = WAITING_FOR_LINK_UP; port->vio.cmp = &comp; vio_port_up(&port->vio); wait_for_completion(&comp.com); if (comp.err) return comp.err; err = generic_request(port, VD_OP_GET_VTOC, &port->label, sizeof(port->label)); if (err < 0) { printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); return err; } err = generic_request(port, VD_OP_GET_DISKGEOM, &port->geom, sizeof(port->geom)); if (err < 0) { printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " "error %d\n", err); return err; } port->vdisk_size = ((u64)port->geom.num_cyl * (u64)port->geom.num_hd * (u64)port->geom.num_sec); q = blk_init_queue(do_vdc_request, &port->vio.lock); if (!q) { printk(KERN_ERR PFX "%s: Could not allocate queue.\n", port->vio.name); return -ENOMEM; } g = alloc_disk(1 << PARTITION_SHIFT); if (!g) { printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", port->vio.name); blk_cleanup_queue(q); return -ENOMEM; } port->disk = g; blk_queue_max_segments(q, port->ring_cookies); blk_queue_max_hw_sectors(q, port->max_xfer_size); g->major = vdc_major; g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; strcpy(g->disk_name, port->disk_name); g->fops = &vdc_fops; g->queue = q; g->private_data = port; g->driverfs_dev = &port->vio.vdev->dev; set_capacity(g, port->vdisk_size); printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", g->disk_name, port->vdisk_size, (port->vdisk_size >> (20 - 9))); add_disk(g); return 0; } static struct ldc_channel_config vdc_ldc_cfg = { .event = vdc_event, .mtu = 64, .mode = LDC_MODE_UNRELIABLE, }; static struct vio_driver_ops vdc_vio_ops = { .send_attr = vdc_send_attr, .handle_attr = vdc_handle_attr, .handshake_complete = vdc_handshake_complete, }; static void __devinit print_version(void) { static int version_printed; if (version_printed++ == 0) printk(KERN_INFO "%s", version); } static int __devinit vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; struct vdc_port *port; int err; print_version(); hp = mdesc_grab(); err = -ENODEV; if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { printk(KERN_ERR PFX "Port id [%llu] too large.\n", vdev->dev_no); goto err_out_release_mdesc; } port = kzalloc(sizeof(*port), GFP_KERNEL); err = -ENOMEM; if (!port) { printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); goto err_out_release_mdesc; } if (vdev->dev_no >= 26) snprintf(port->disk_name, sizeof(port->disk_name), VDCBLK_NAME "%c%c", 'a' + ((int)vdev->dev_no / 26) - 1, 'a' + ((int)vdev->dev_no % 26)); else snprintf(port->disk_name, sizeof(port->disk_name), VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); err = vio_driver_init(&port->vio, vdev, VDEV_DISK, vdc_versions, ARRAY_SIZE(vdc_versions), &vdc_vio_ops, port->disk_name); if (err) goto err_out_free_port; port->vdisk_block_size = 512; port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); port->ring_cookies = ((port->max_xfer_size * port->vdisk_block_size) / PAGE_SIZE) + 2; err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); if (err) goto err_out_free_port; err = vdc_alloc_tx_ring(port); if (err) goto err_out_free_ldc; err = probe_disk(port); if (err) goto err_out_free_tx_ring; dev_set_drvdata(&vdev->dev, port); mdesc_release(hp); return 0; err_out_free_tx_ring: vdc_free_tx_ring(port); err_out_free_ldc: vio_ldc_free(&port->vio); err_out_free_port: kfree(port); err_out_release_mdesc: mdesc_release(hp); return err; } static int vdc_port_remove(struct vio_dev *vdev) { struct vdc_port *port = dev_get_drvdata(&vdev->dev); if (port) { del_timer_sync(&port->vio.timer); vdc_free_tx_ring(port); vio_ldc_free(&port->vio); dev_set_drvdata(&vdev->dev, NULL); kfree(port); } return 0; } static const struct vio_device_id vdc_port_match[] = { { .type = "vdc-port", }, {}, }; MODULE_DEVICE_TABLE(vio, vdc_port_match); static struct vio_driver vdc_port_driver = { .id_table = vdc_port_match, .probe = vdc_port_probe, .remove = vdc_port_remove, .name = "vdc_port", }; static int __init vdc_init(void) { int err; err = register_blkdev(0, VDCBLK_NAME); if (err < 0) goto out_err; vdc_major = err; err = vio_register_driver(&vdc_port_driver); if (err) goto out_unregister_blkdev; return 0; out_unregister_blkdev: unregister_blkdev(vdc_major, VDCBLK_NAME); vdc_major = 0; out_err: return err; } static void __exit vdc_exit(void) { vio_unregister_driver(&vdc_port_driver); unregister_blkdev(vdc_major, VDCBLK_NAME); } module_init(vdc_init); module_exit(vdc_exit);
gpl-2.0
jamison904/d2tmo_kernel
drivers/usb/host/ehci-s5p.c
1784
4631
/* * SAMSUNG S5P USB HOST EHCI Controller * * Copyright (C) 2011 Samsung Electronics Co.Ltd * Author: Jingoo Han <jg1.han@samsung.com> * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/platform_device.h> #include <mach/regs-pmu.h> #include <plat/cpu.h> #include <plat/ehci.h> #include <plat/usb-phy.h> struct s5p_ehci_hcd { struct device *dev; struct usb_hcd *hcd; struct clk *clk; }; static const struct hc_driver s5p_ehci_hc_driver = { .description = hcd_name, .product_desc = "S5P EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, .reset = ehci_init, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, .get_frame_number = ehci_get_frame, .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int __devinit s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata; struct s5p_ehci_hcd *s5p_ehci; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource *res; int irq; int err; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "No platform data defined\n"); return -EINVAL; } s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL); if (!s5p_ehci) return -ENOMEM; s5p_ehci->dev = &pdev->dev; hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); err = -ENOMEM; goto fail_hcd; } s5p_ehci->hcd = hcd; s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); if (IS_ERR(s5p_ehci->clk)) { dev_err(&pdev->dev, "Failed to get usbhost clock\n"); err = PTR_ERR(s5p_ehci->clk); goto fail_clk; } err = clk_enable(s5p_ehci->clk); if (err) goto fail_clken; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); err = -ENXIO; goto fail_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(res->start, resource_size(res)); if (!hcd->regs) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); err = -ENOMEM; goto fail_io; } irq = platform_get_irq(pdev, 0); if (!irq) { dev_err(&pdev->dev, "Failed to get IRQ\n"); err = -ENODEV; goto fail; } if (pdata->phy_init) pdata->phy_init(pdev, S5P_USB_PHY_HOST); ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = readl(&ehci->caps->hcs_params); err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD\n"); goto fail; } platform_set_drvdata(pdev, s5p_ehci); return 0; fail: iounmap(hcd->regs); fail_io: clk_disable(s5p_ehci->clk); fail_clken: clk_put(s5p_ehci->clk); fail_clk: usb_put_hcd(hcd); fail_hcd: kfree(s5p_ehci); return err; } static int __devexit s5p_ehci_remove(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ehci->hcd; usb_remove_hcd(hcd); if (pdata && pdata->phy_exit) pdata->phy_exit(pdev, S5P_USB_PHY_HOST); iounmap(hcd->regs); clk_disable(s5p_ehci->clk); clk_put(s5p_ehci->clk); usb_put_hcd(hcd); kfree(s5p_ehci); return 0; } static void s5p_ehci_shutdown(struct platform_device *pdev) { struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ehci->hcd; if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static struct platform_driver s5p_ehci_driver = { .probe = s5p_ehci_probe, .remove = __devexit_p(s5p_ehci_remove), .shutdown = s5p_ehci_shutdown, .driver = { .name = "s5p-ehci", .owner = THIS_MODULE, } }; MODULE_ALIAS("platform:s5p-ehci");
gpl-2.0
harunjo/galaxsih-kernel-JB-S3
crypto/algif_hash.c
2040
6895
/* * algif_hash: User-space interface for hash algorithms * * This file provides the user-space API for hash algorithms. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/hash.h> #include <crypto/if_alg.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> struct hash_ctx { struct af_alg_sgl sgl; u8 *result; struct af_alg_completion completion; unsigned int len; bool more; struct ahash_request req; }; static int hash_sendmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored) { int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned long iovlen; struct iovec *iov; long copied = 0; int err; if (limit > sk->sk_sndbuf) limit = sk->sk_sndbuf; lock_sock(sk); if (!ctx->more) { err = crypto_ahash_init(&ctx->req); if (err) goto unlock; } ctx->more = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { int len = min_t(unsigned long, seglen, limit); int newlen; newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); if (newlen < 0) goto unlock; ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, newlen); err = af_alg_wait_for_completion( crypto_ahash_update(&ctx->req), &ctx->completion); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; seglen -= newlen; from += newlen; copied += newlen; } } err = 0; ctx->more = msg->msg_flags & MSG_MORE; if (!ctx->more) { ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); } unlock: release_sock(sk); return err ?: copied; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; int err; lock_sock(sk); sg_init_table(ctx->sgl.sg, 1); sg_set_page(ctx->sgl.sg, page, size, offset); ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); if (!(flags & MSG_MORE)) { if (ctx->more) err = crypto_ahash_finup(&ctx->req); else err = crypto_ahash_digest(&ctx->req); } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); if (err) goto unlock; } err = crypto_ahash_update(&ctx->req); } err = af_alg_wait_for_completion(err, &ctx->completion); if (err) goto unlock; ctx->more = flags & MSG_MORE; unlock: release_sock(sk); return err ?: size; } static int hash_recvmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); int err; if (len > ds) len = ds; else if (len < ds) msg->msg_flags |= MSG_TRUNC; lock_sock(sk); if (ctx->more) { ctx->more = 0; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; } err = memcpy_toiovec(msg->msg_iov, ctx->result, len); unlock: release_sock(sk); return err ?: len; } static int hash_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; struct ahash_request *req = &ctx->req; char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; struct sock *sk2; struct alg_sock *ask2; struct hash_ctx *ctx2; int err; err = crypto_ahash_export(req, state); if (err) return err; err = af_alg_accept(ask->parent, newsock); if (err) return err; sk2 = newsock->sk; ask2 = alg_sk(sk2); ctx2 = ask2->private; ctx2->more = 1; err = crypto_ahash_import(&ctx2->req, state); if (err) { sock_orphan(sk2); sock_put(sk2); } return err; } static struct proto_ops algif_hash_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .setsockopt = sock_no_setsockopt, .poll = sock_no_poll, .release = af_alg_release, .sendmsg = hash_sendmsg, .sendpage = hash_sendpage, .recvmsg = hash_recvmsg, .accept = hash_accept, }; static void *hash_bind(const char *name, u32 type, u32 mask) { return crypto_alloc_ahash(name, type, mask); } static void hash_release(void *private) { crypto_free_ahash(private); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { return crypto_ahash_setkey(private, key, keylen); } static void hash_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; sock_kfree_s(sk, ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } static int hash_accept_parent(void *private, struct sock *sk) { struct hash_ctx *ctx; struct alg_sock *ask = alg_sk(sk); unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); unsigned ds = crypto_ahash_digestsize(private); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); if (!ctx->result) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } memset(ctx->result, 0, ds); ctx->len = len; ctx->more = 0; af_alg_init_completion(&ctx->completion); ask->private = ctx; ahash_request_set_tfm(&ctx->req, private); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = hash_sock_destruct; return 0; } static const struct af_alg_type algif_type_hash = { .bind = hash_bind, .release = hash_release, .setkey = hash_setkey, .accept = hash_accept_parent, .ops = &algif_hash_ops, .name = "hash", .owner = THIS_MODULE }; static int __init algif_hash_init(void) { return af_alg_register_type(&algif_type_hash); } static void __exit algif_hash_exit(void) { int err = af_alg_unregister_type(&algif_type_hash); BUG_ON(err); } module_init(algif_hash_init); module_exit(algif_hash_exit); MODULE_LICENSE("GPL");
gpl-2.0
gototem/kernel
drivers/virtio/virtio_mmio.c
2296
17321
/* * Virtio memory mapped device driver * * Copyright 2011, ARM Ltd. * * This module allows virtio devices to be used over a virtual, memory mapped * platform device. * * The guest device(s) may be instantiated in one of three equivalent ways: * * 1. Static platform device in board's code, eg.: * * static struct platform_device v2m_virtio_device = { * .name = "virtio-mmio", * .id = -1, * .num_resources = 2, * .resource = (struct resource []) { * { * .start = 0x1001e000, * .end = 0x1001e0ff, * .flags = IORESOURCE_MEM, * }, { * .start = 42 + 32, * .end = 42 + 32, * .flags = IORESOURCE_IRQ, * }, * } * }; * * 2. Device Tree node, eg.: * * virtio_block@1e000 { * compatible = "virtio,mmio"; * reg = <0x1e000 0x100>; * interrupts = <42>; * } * * 3. Kernel module (or command line) parameter. Can be used more than once - * one device will be created for each one. Syntax: * * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>] * where: * <size> := size (can use standard suffixes like K, M or G) * <baseaddr> := physical base address * <irq> := interrupt number (as passed to request_irq()) * <id> := (optional) platform device id * eg.: * virtio_mmio.device=0x100@0x100b0000:48 \ * virtio_mmio.device=1K@0x1001e000:74 * * * * Registers layout (all 32-bit wide): * * offset d. name description * ------ -- ---------------- ----------------- * * 0x000 R MagicValue Magic value "virt" * 0x004 R Version Device version (current max. 1) * 0x008 R DeviceID Virtio device ID * 0x00c R VendorID Virtio vendor ID * * 0x010 R HostFeatures Features supported by the host * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures * * 0x020 W GuestFeatures Features activated by the guest * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures * 0x028 W GuestPageSize Size of guest's memory page in bytes * * 0x030 W QueueSel Queue selector * 0x034 R QueueNumMax Maximum size of the currently selected queue * 0x038 W QueueNum Queue size for the currently selected queue * 0x03c W QueueAlign Used Ring alignment for the current queue * 0x040 RW QueuePFN PFN for the currently selected queue * * 0x050 W QueueNotify Queue notifier * 0x060 R InterruptStatus Interrupt status register * 0x064 W InterruptACK Interrupt acknowledge register * 0x070 RW Status Device status register * * 0x100+ RW Device-specific configuration space * * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #define pr_fmt(fmt) "virtio-mmio: " fmt #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/virtio_mmio.h> #include <linux/virtio_ring.h> /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE #define to_virtio_mmio_device(_plat_dev) \ container_of(_plat_dev, struct virtio_mmio_device, vdev) struct virtio_mmio_device { struct virtio_device vdev; struct platform_device *pdev; void __iomem *base; unsigned long version; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; }; struct virtio_mmio_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ unsigned int num; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; }; /* Configuration interface */ static u32 vm_get_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); /* TODO: Features > 32 bits */ writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); } static void vm_finalize_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); int i; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); writel(vdev->features[i], vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); } } static void vm_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); } static void vm_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); const u8 *ptr = buf; int i; for (i = 0; i < len; i++) writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); } static u8 vm_get_status(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; } static void vm_set_status(struct virtio_device *vdev, u8 status) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); } static void vm_reset(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); /* 0 status means a reset. */ writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); } /* Transport interface */ /* the notify function used when creating a virt queue */ static void vm_notify(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); /* We write the queue's selector into the notification register to * signal the other end */ writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vm_interrupt(int irq, void *opaque) { struct virtio_mmio_device *vm_dev = opaque; struct virtio_mmio_vq_info *info; struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, struct virtio_driver, driver); unsigned long status; unsigned long flags; irqreturn_t ret = IRQ_NONE; /* Read and acknowledge interrupts */ status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) && vdrv && vdrv->config_changed) { vdrv->config_changed(&vm_dev->vdev); ret = IRQ_HANDLED; } if (likely(status & VIRTIO_MMIO_INT_VRING)) { spin_lock_irqsave(&vm_dev->lock, flags); list_for_each_entry(info, &vm_dev->virtqueues, node) ret |= vring_interrupt(irq, info->vq); spin_unlock_irqrestore(&vm_dev->lock, flags); } return ret; } static void vm_del_vq(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; unsigned long flags, size; unsigned int index = vq->index; spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vm_dev->lock, flags); vring_del_virtqueue(vq); /* Select and deactivate the queue */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } static void vm_del_vqs(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) vm_del_vq(vq); free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); } static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_vq_info *info; struct virtqueue *vq; unsigned long flags, size; int err; if (!name) return NULL; /* Select the queue we're interested in */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); /* Queue shouldn't already be set up. */ if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { err = -ENOENT; goto error_available; } /* Allocate and fill out our active queue description */ info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { err = -ENOMEM; goto error_kmalloc; } /* Allocate pages for the queue - start with a queue as big as * possible (limited by maximum size allowed by device), drop down * to a minimal size, just big enough to fit descriptor table * and two rings (which makes it "alignment_size * 2") */ info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); /* If the device reports a 0 entry queue, we won't be able to * use it to perform I/O, and vring_new_virtqueue() can't create * empty queues anyway, so don't bother to set up the device. */ if (info->num == 0) { err = -ENOENT; goto error_alloc_pages; } while (1) { size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); /* Did the last iter shrink the queue below minimum size? */ if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { err = -ENOMEM; goto error_alloc_pages; } info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (info->queue) break; info->num /= 2; } /* Activate the queue */ writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); writel(VIRTIO_MMIO_VRING_ALIGN, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); writel(virt_to_phys(info->queue) >> PAGE_SHIFT, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); /* Create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, true, info->queue, vm_notify, callback, name); if (!vq) { err = -ENOMEM; goto error_new_virtqueue; } vq->priv = info; info->vq = vq; spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); spin_unlock_irqrestore(&vm_dev->lock, flags); return vq; error_new_virtqueue: writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); free_pages_exact(info->queue, size); error_alloc_pages: kfree(info); error_kmalloc: error_available: return ERR_PTR(err); } static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); int i, err; err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); if (err) return err; for (i = 0; i < nvqs; ++i) { vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); return PTR_ERR(vqs[i]); } } return 0; } static const char *vm_bus_name(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); return vm_dev->pdev->name; } static const struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, .set = vm_set, .get_status = vm_get_status, .set_status = vm_set_status, .reset = vm_reset, .find_vqs = vm_find_vqs, .del_vqs = vm_del_vqs, .get_features = vm_get_features, .finalize_features = vm_finalize_features, .bus_name = vm_bus_name, }; /* Platform device */ static int virtio_mmio_probe(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev; struct resource *mem; unsigned long magic; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->name)) return -EBUSY; vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; vm_dev->vdev.dev.parent = &pdev->dev; vm_dev->vdev.config = &virtio_mmio_config_ops; vm_dev->pdev = pdev; INIT_LIST_HEAD(&vm_dev->virtqueues); spin_lock_init(&vm_dev->lock); vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (vm_dev->base == NULL) return -EFAULT; /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); if (memcmp(&magic, "virt", 4) != 0) { dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); return -ENODEV; } /* Check device version */ vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); if (vm_dev->version != 1) { dev_err(&pdev->dev, "Version %ld not supported!\n", vm_dev->version); return -ENXIO; } vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); platform_set_drvdata(pdev, vm_dev); return register_virtio_device(&vm_dev->vdev); } static int virtio_mmio_remove(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); unregister_virtio_device(&vm_dev->vdev); return 0; } /* Devices list parameter */ #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES) static struct device vm_cmdline_parent = { .init_name = "virtio-mmio-cmdline", }; static int vm_cmdline_parent_registered; static int vm_cmdline_id; static int vm_cmdline_set(const char *device, const struct kernel_param *kp) { int err; struct resource resources[2] = {}; char *str; long long int base, size; unsigned int irq; int processed, consumed = 0; struct platform_device *pdev; /* Consume "size" part of the command line parameter */ size = memparse(device, &str); /* Get "@<base>:<irq>[:<id>]" chunks */ processed = sscanf(str, "@%lli:%u%n:%d%n", &base, &irq, &consumed, &vm_cmdline_id, &consumed); /* * sscanf() must processes at least 2 chunks; also there * must be no extra characters after the last chunk, so * str[consumed] must be '\0' */ if (processed < 2 || str[consumed]) return -EINVAL; resources[0].flags = IORESOURCE_MEM; resources[0].start = base; resources[0].end = base + size - 1; resources[1].flags = IORESOURCE_IRQ; resources[1].start = resources[1].end = irq; if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); if (err) { pr_err("Failed to register parent device!\n"); return err; } vm_cmdline_parent_registered = 1; } pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", vm_cmdline_id, (unsigned long long)resources[0].start, (unsigned long long)resources[0].end, (int)resources[1].start); pdev = platform_device_register_resndata(&vm_cmdline_parent, "virtio-mmio", vm_cmdline_id++, resources, ARRAY_SIZE(resources), NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static int vm_cmdline_get_device(struct device *dev, void *data) { char *buffer = data; unsigned int len = strlen(buffer); struct platform_device *pdev = to_platform_device(dev); snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", pdev->resource[0].end - pdev->resource[0].start + 1ULL, (unsigned long long)pdev->resource[0].start, (unsigned long long)pdev->resource[1].start, pdev->id); return 0; } static int vm_cmdline_get(char *buffer, const struct kernel_param *kp) { buffer[0] = '\0'; device_for_each_child(&vm_cmdline_parent, buffer, vm_cmdline_get_device); return strlen(buffer) + 1; } static struct kernel_param_ops vm_cmdline_param_ops = { .set = vm_cmdline_set, .get = vm_cmdline_get, }; device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR); static int vm_unregister_cmdline_device(struct device *dev, void *data) { platform_device_unregister(to_platform_device(dev)); return 0; } static void vm_unregister_cmdline_devices(void) { if (vm_cmdline_parent_registered) { device_for_each_child(&vm_cmdline_parent, NULL, vm_unregister_cmdline_device); device_unregister(&vm_cmdline_parent); vm_cmdline_parent_registered = 0; } } #else static void vm_unregister_cmdline_devices(void) { } #endif /* Platform driver */ static struct of_device_id virtio_mmio_match[] = { { .compatible = "virtio,mmio", }, {}, }; MODULE_DEVICE_TABLE(of, virtio_mmio_match); static struct platform_driver virtio_mmio_driver = { .probe = virtio_mmio_probe, .remove = virtio_mmio_remove, .driver = { .name = "virtio-mmio", .owner = THIS_MODULE, .of_match_table = virtio_mmio_match, }, }; static int __init virtio_mmio_init(void) { return platform_driver_register(&virtio_mmio_driver); } static void __exit virtio_mmio_exit(void) { platform_driver_unregister(&virtio_mmio_driver); vm_unregister_cmdline_devices(); } module_init(virtio_mmio_init); module_exit(virtio_mmio_exit); MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); MODULE_LICENSE("GPL");
gpl-2.0
googyanas/GoogyMax-N5
fs/nfs/blocklayout/extents.c
2296
25857
/* * linux/fs/nfs/blocklayout/blocklayout.h * * Module for the NFSv4.1 pNFS block layout driver. * * Copyright (c) 2006 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@citi.umich.edu> * Fred Isaman <iisaman@umich.edu> * * permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any purpose, * so long as the name of the university of michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. if * the above copyright notice or any other identification of the * university of michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * this software is provided as is, without representation from the * university of michigan as to its fitness for any purpose, and without * warranty by the university of michigan of any kind, either express * or implied, including without limitation the implied warranties of * merchantability and fitness for a particular purpose. the regents * of the university of michigan shall not be liable for any damages, * including special, indirect, incidental, or consequential damages, * with respect to any claim arising out or in connection with the use * of the software, even if it has been or is hereafter advised of the * possibility of such damages. */ #include "blocklayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD /* Bit numbers */ #define EXTENT_INITIALIZED 0 #define EXTENT_WRITTEN 1 #define EXTENT_IN_COMMIT 2 #define INTERNAL_EXISTS MY_MAX_TAGS #define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1) /* Returns largest t<=s s.t. t%base==0 */ static inline sector_t normalize(sector_t s, int base) { sector_t tmp = s; /* Since do_div modifies its argument */ return s - sector_div(tmp, base); } static inline sector_t normalize_up(sector_t s, int base) { return normalize(s + base - 1, base); } /* Complete stub using list while determine API wanted */ /* Returns tags, or negative */ static int32_t _find_entry(struct my_tree *tree, u64 s) { struct pnfs_inval_tracking *pos; dprintk("%s(%llu) enter\n", __func__, s); list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { if (pos->it_sector > s) continue; else if (pos->it_sector == s) return pos->it_tags & INTERNAL_MASK; else break; } return -ENOENT; } static inline int _has_tag(struct my_tree *tree, u64 s, int32_t tag) { int32_t tags; dprintk("%s(%llu, %i) enter\n", __func__, s, tag); s = normalize(s, tree->mtt_step_size); tags = _find_entry(tree, s); if ((tags < 0) || !(tags & (1 << tag))) return 0; else return 1; } /* Creates entry with tag, or if entry already exists, unions tag to it. * If storage is not NULL, newly created entry will use it. * Returns number of entries added, or negative on error. */ static int _add_entry(struct my_tree *tree, u64 s, int32_t tag, struct pnfs_inval_tracking *storage) { int found = 0; struct pnfs_inval_tracking *pos; dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage); list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { if (pos->it_sector > s) continue; else if (pos->it_sector == s) { found = 1; break; } else break; } if (found) { pos->it_tags |= (1 << tag); return 0; } else { struct pnfs_inval_tracking *new; new = storage; new->it_sector = s; new->it_tags = (1 << tag); list_add(&new->it_link, &pos->it_link); return 1; } } /* XXXX Really want option to not create */ /* Over range, unions tag with existing entries, else creates entry with tag */ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length) { u64 i; dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length); for (i = normalize(s, tree->mtt_step_size); i < s + length; i += tree->mtt_step_size) if (_add_entry(tree, i, tag, NULL)) return -ENOMEM; return 0; } /* Ensure that future operations on given range of tree will not malloc */ static int _preload_range(struct pnfs_inval_markings *marks, u64 offset, u64 length) { u64 start, end, s; int count, i, used = 0, status = -ENOMEM; struct pnfs_inval_tracking **storage; struct my_tree *tree = &marks->im_tree; dprintk("%s(%llu, %llu) enter\n", __func__, offset, length); start = normalize(offset, tree->mtt_step_size); end = normalize_up(offset + length, tree->mtt_step_size); count = (int)(end - start) / (int)tree->mtt_step_size; /* Pre-malloc what memory we might need */ storage = kcalloc(count, sizeof(*storage), GFP_NOFS); if (!storage) return -ENOMEM; for (i = 0; i < count; i++) { storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking), GFP_NOFS); if (!storage[i]) goto out_cleanup; } spin_lock_bh(&marks->im_lock); for (s = start; s < end; s += tree->mtt_step_size) used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]); spin_unlock_bh(&marks->im_lock); status = 0; out_cleanup: for (i = used; i < count; i++) { if (!storage[i]) break; kfree(storage[i]); } kfree(storage); return status; } /* We are relying on page lock to serialize this */ int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect) { int rv; spin_lock_bh(&marks->im_lock); rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED); spin_unlock_bh(&marks->im_lock); return rv; } /* Assume start, end already sector aligned */ static int _range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag) { struct pnfs_inval_tracking *pos; u64 expect = 0; dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag); list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { if (pos->it_sector >= end) continue; if (!expect) { if ((pos->it_sector == end - tree->mtt_step_size) && (pos->it_tags & (1 << tag))) { expect = pos->it_sector - tree->mtt_step_size; if (pos->it_sector < tree->mtt_step_size || expect < start) return 1; continue; } else { return 0; } } if (pos->it_sector != expect || !(pos->it_tags & (1 << tag))) return 0; expect -= tree->mtt_step_size; if (expect < start) return 1; } return 0; } static int is_range_written(struct pnfs_inval_markings *marks, sector_t start, sector_t end) { int rv; spin_lock_bh(&marks->im_lock); rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN); spin_unlock_bh(&marks->im_lock); return rv; } /* Marks sectors in [offest, offset_length) as having been initialized. * All lengths are step-aligned, where step is min(pagesize, blocksize). * Currently assumes offset is page-aligned */ int bl_mark_sectors_init(struct pnfs_inval_markings *marks, sector_t offset, sector_t length) { sector_t start, end; dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, (u64)offset, (u64)length); start = normalize(offset, marks->im_block_size); end = normalize_up(offset + length, marks->im_block_size); if (_preload_range(marks, start, end - start)) goto outerr; spin_lock_bh(&marks->im_lock); if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length)) goto out_unlock; spin_unlock_bh(&marks->im_lock); return 0; out_unlock: spin_unlock_bh(&marks->im_lock); outerr: return -ENOMEM; } /* Marks sectors in [offest, offset+length) as having been written to disk. * All lengths should be block aligned. */ static int mark_written_sectors(struct pnfs_inval_markings *marks, sector_t offset, sector_t length) { int status; dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, (u64)offset, (u64)length); spin_lock_bh(&marks->im_lock); status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length); spin_unlock_bh(&marks->im_lock); return status; } static void print_short_extent(struct pnfs_block_short_extent *be) { dprintk("PRINT SHORT EXTENT extent %p\n", be); if (be) { dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset); dprintk(" be_length %llu\n", (u64)be->bse_length); } } static void print_clist(struct list_head *list, unsigned int count) { struct pnfs_block_short_extent *be; unsigned int i = 0; ifdebug(FACILITY) { printk(KERN_DEBUG "****************\n"); printk(KERN_DEBUG "Extent list looks like:\n"); list_for_each_entry(be, list, bse_node) { i++; print_short_extent(be); } if (i != count) printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count); printk(KERN_DEBUG "****************\n"); } } /* Note: In theory, we should do more checking that devid's match between * old and new, but if they don't, the lists are too corrupt to salvage anyway. */ /* Note this is very similar to bl_add_merge_extent */ static void add_to_commitlist(struct pnfs_block_layout *bl, struct pnfs_block_short_extent *new) { struct list_head *clist = &bl->bl_commit; struct pnfs_block_short_extent *old, *save; sector_t end = new->bse_f_offset + new->bse_length; dprintk("%s enter\n", __func__); print_short_extent(new); print_clist(clist, bl->bl_count); bl->bl_count++; /* Scan for proper place to insert, extending new to the left * as much as possible. */ list_for_each_entry_safe(old, save, clist, bse_node) { if (new->bse_f_offset < old->bse_f_offset) break; if (end <= old->bse_f_offset + old->bse_length) { /* Range is already in list */ bl->bl_count--; kfree(new); return; } else if (new->bse_f_offset <= old->bse_f_offset + old->bse_length) { /* new overlaps or abuts existing be */ if (new->bse_mdev == old->bse_mdev) { /* extend new to fully replace old */ new->bse_length += new->bse_f_offset - old->bse_f_offset; new->bse_f_offset = old->bse_f_offset; list_del(&old->bse_node); bl->bl_count--; kfree(old); } } } /* Note that if we never hit the above break, old will not point to a * valid extent. However, in that case &old->bse_node==list. */ list_add_tail(&new->bse_node, &old->bse_node); /* Scan forward for overlaps. If we find any, extend new and * remove the overlapped extent. */ old = list_prepare_entry(new, clist, bse_node); list_for_each_entry_safe_continue(old, save, clist, bse_node) { if (end < old->bse_f_offset) break; /* new overlaps or abuts old */ if (new->bse_mdev == old->bse_mdev) { if (end < old->bse_f_offset + old->bse_length) { /* extend new to fully cover old */ end = old->bse_f_offset + old->bse_length; new->bse_length = end - new->bse_f_offset; } list_del(&old->bse_node); bl->bl_count--; kfree(old); } } dprintk("%s: after merging\n", __func__); print_clist(clist, bl->bl_count); } /* Note the range described by offset, length is guaranteed to be contained * within be. * new will be freed, either by this function or add_to_commitlist if they * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist. */ int bl_mark_for_commit(struct pnfs_block_extent *be, sector_t offset, sector_t length, struct pnfs_block_short_extent *new) { sector_t new_end, end = offset + length; struct pnfs_block_layout *bl = container_of(be->be_inval, struct pnfs_block_layout, bl_inval); mark_written_sectors(be->be_inval, offset, length); /* We want to add the range to commit list, but it must be * block-normalized, and verified that the normalized range has * been entirely written to disk. */ new->bse_f_offset = offset; offset = normalize(offset, bl->bl_blocksize); if (offset < new->bse_f_offset) { if (is_range_written(be->be_inval, offset, new->bse_f_offset)) new->bse_f_offset = offset; else new->bse_f_offset = offset + bl->bl_blocksize; } new_end = normalize_up(end, bl->bl_blocksize); if (end < new_end) { if (is_range_written(be->be_inval, end, new_end)) end = new_end; else end = new_end - bl->bl_blocksize; } if (end <= new->bse_f_offset) { kfree(new); return 0; } new->bse_length = end - new->bse_f_offset; new->bse_devid = be->be_devid; new->bse_mdev = be->be_mdev; spin_lock(&bl->bl_ext_lock); add_to_commitlist(bl, new); spin_unlock(&bl->bl_ext_lock); return 0; } static void print_bl_extent(struct pnfs_block_extent *be) { dprintk("PRINT EXTENT extent %p\n", be); if (be) { dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset); dprintk(" be_length %llu\n", (u64)be->be_length); dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset); dprintk(" be_state %d\n", be->be_state); } } static void destroy_extent(struct kref *kref) { struct pnfs_block_extent *be; be = container_of(kref, struct pnfs_block_extent, be_refcnt); dprintk("%s be=%p\n", __func__, be); kfree(be); } void bl_put_extent(struct pnfs_block_extent *be) { if (be) { dprintk("%s enter %p (%i)\n", __func__, be, atomic_read(&be->be_refcnt.refcount)); kref_put(&be->be_refcnt, destroy_extent); } } struct pnfs_block_extent *bl_alloc_extent(void) { struct pnfs_block_extent *be; be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS); if (!be) return NULL; INIT_LIST_HEAD(&be->be_node); kref_init(&be->be_refcnt); be->be_inval = NULL; return be; } static void print_elist(struct list_head *list) { struct pnfs_block_extent *be; dprintk("****************\n"); dprintk("Extent list looks like:\n"); list_for_each_entry(be, list, be_node) { print_bl_extent(be); } dprintk("****************\n"); } static inline int extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new) { /* Note this assumes new->be_f_offset >= old->be_f_offset */ return (new->be_state == old->be_state) && ((new->be_state == PNFS_BLOCK_NONE_DATA) || ((new->be_v_offset - old->be_v_offset == new->be_f_offset - old->be_f_offset) && new->be_mdev == old->be_mdev)); } /* Adds new to appropriate list in bl, modifying new and removing existing * extents as appropriate to deal with overlaps. * * See bl_find_get_extent for list constraints. * * Refcount on new is already set. If end up not using it, or error out, * need to put the reference. * * bl->bl_ext_lock is held by caller. */ int bl_add_merge_extent(struct pnfs_block_layout *bl, struct pnfs_block_extent *new) { struct pnfs_block_extent *be, *tmp; sector_t end = new->be_f_offset + new->be_length; struct list_head *list; dprintk("%s enter with be=%p\n", __func__, new); print_bl_extent(new); list = &bl->bl_extents[bl_choose_list(new->be_state)]; print_elist(list); /* Scan for proper place to insert, extending new to the left * as much as possible. */ list_for_each_entry_safe_reverse(be, tmp, list, be_node) { if (new->be_f_offset >= be->be_f_offset + be->be_length) break; if (new->be_f_offset >= be->be_f_offset) { if (end <= be->be_f_offset + be->be_length) { /* new is a subset of existing be*/ if (extents_consistent(be, new)) { dprintk("%s: new is subset, ignoring\n", __func__); bl_put_extent(new); return 0; } else { goto out_err; } } else { /* |<-- be -->| * |<-- new -->| */ if (extents_consistent(be, new)) { /* extend new to fully replace be */ new->be_length += new->be_f_offset - be->be_f_offset; new->be_f_offset = be->be_f_offset; new->be_v_offset = be->be_v_offset; dprintk("%s: removing %p\n", __func__, be); list_del(&be->be_node); bl_put_extent(be); } else { goto out_err; } } } else if (end >= be->be_f_offset + be->be_length) { /* new extent overlap existing be */ if (extents_consistent(be, new)) { /* extend new to fully replace be */ dprintk("%s: removing %p\n", __func__, be); list_del(&be->be_node); bl_put_extent(be); } else { goto out_err; } } else if (end > be->be_f_offset) { /* |<-- be -->| *|<-- new -->| */ if (extents_consistent(new, be)) { /* extend new to fully replace be */ new->be_length += be->be_f_offset + be->be_length - new->be_f_offset - new->be_length; dprintk("%s: removing %p\n", __func__, be); list_del(&be->be_node); bl_put_extent(be); } else { goto out_err; } } } /* Note that if we never hit the above break, be will not point to a * valid extent. However, in that case &be->be_node==list. */ list_add(&new->be_node, &be->be_node); dprintk("%s: inserting new\n", __func__); print_elist(list); /* FIXME - The per-list consistency checks have all been done, * should now check cross-list consistency. */ return 0; out_err: bl_put_extent(new); return -EIO; } /* Returns extent, or NULL. If a second READ extent exists, it is returned * in cow_read, if given. * * The extents are kept in two seperate ordered lists, one for READ and NONE, * one for READWRITE and INVALID. Within each list, we assume: * 1. Extents are ordered by file offset. * 2. For any given isect, there is at most one extents that matches. */ struct pnfs_block_extent * bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, struct pnfs_block_extent **cow_read) { struct pnfs_block_extent *be, *cow, *ret; int i; dprintk("%s enter with isect %llu\n", __func__, (u64)isect); cow = ret = NULL; spin_lock(&bl->bl_ext_lock); for (i = 0; i < EXTENT_LISTS; i++) { list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { if (isect >= be->be_f_offset + be->be_length) break; if (isect >= be->be_f_offset) { /* We have found an extent */ dprintk("%s Get %p (%i)\n", __func__, be, atomic_read(&be->be_refcnt.refcount)); kref_get(&be->be_refcnt); if (!ret) ret = be; else if (be->be_state != PNFS_BLOCK_READ_DATA) bl_put_extent(be); else cow = be; break; } } if (ret && (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA)) break; } spin_unlock(&bl->bl_ext_lock); if (cow_read) *cow_read = cow; print_bl_extent(ret); return ret; } /* Similar to bl_find_get_extent, but called with lock held, and ignores cow */ static struct pnfs_block_extent * bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect) { struct pnfs_block_extent *be, *ret = NULL; int i; dprintk("%s enter with isect %llu\n", __func__, (u64)isect); for (i = 0; i < EXTENT_LISTS; i++) { if (ret) break; list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { if (isect >= be->be_f_offset + be->be_length) break; if (isect >= be->be_f_offset) { /* We have found an extent */ dprintk("%s Get %p (%i)\n", __func__, be, atomic_read(&be->be_refcnt.refcount)); kref_get(&be->be_refcnt); ret = be; break; } } } print_bl_extent(ret); return ret; } int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, struct xdr_stream *xdr, const struct nfs4_layoutcommit_args *arg) { struct pnfs_block_short_extent *lce, *save; unsigned int count = 0; __be32 *p, *xdr_start; dprintk("%s enter\n", __func__); /* BUG - creation of bl_commit is buggy - need to wait for * entire block to be marked WRITTEN before it can be added. */ spin_lock(&bl->bl_ext_lock); /* Want to adjust for possible truncate */ /* We now want to adjust argument range */ /* XDR encode the ranges found */ xdr_start = xdr_reserve_space(xdr, 8); if (!xdr_start) goto out; list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) { p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data)); if (!p) break; p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE); p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT); p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT); p = xdr_encode_hyper(p, 0LL); *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); list_move_tail(&lce->bse_node, &bl->bl_committing); bl->bl_count--; count++; } xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4); xdr_start[1] = cpu_to_be32(count); out: spin_unlock(&bl->bl_ext_lock); dprintk("%s found %i ranges\n", __func__, count); return 0; } /* Helper function to set_to_rw that initialize a new extent */ static void _prep_new_extent(struct pnfs_block_extent *new, struct pnfs_block_extent *orig, sector_t offset, sector_t length, int state) { kref_init(&new->be_refcnt); /* don't need to INIT_LIST_HEAD(&new->be_node) */ memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid)); new->be_mdev = orig->be_mdev; new->be_f_offset = offset; new->be_length = length; new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset; new->be_state = state; new->be_inval = orig->be_inval; } /* Tries to merge be with extent in front of it in list. * Frees storage if not used. */ static struct pnfs_block_extent * _front_merge(struct pnfs_block_extent *be, struct list_head *head, struct pnfs_block_extent *storage) { struct pnfs_block_extent *prev; if (!storage) goto no_merge; if (&be->be_node == head || be->be_node.prev == head) goto no_merge; prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node); if ((prev->be_f_offset + prev->be_length != be->be_f_offset) || !extents_consistent(prev, be)) goto no_merge; _prep_new_extent(storage, prev, prev->be_f_offset, prev->be_length + be->be_length, prev->be_state); list_replace(&prev->be_node, &storage->be_node); bl_put_extent(prev); list_del(&be->be_node); bl_put_extent(be); return storage; no_merge: kfree(storage); return be; } static u64 set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length) { u64 rv = offset + length; struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old; struct pnfs_block_extent *children[3]; struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL; int i = 0, j; dprintk("%s(%llu, %llu)\n", __func__, offset, length); /* Create storage for up to three new extents e1, e2, e3 */ e1 = kmalloc(sizeof(*e1), GFP_ATOMIC); e2 = kmalloc(sizeof(*e2), GFP_ATOMIC); e3 = kmalloc(sizeof(*e3), GFP_ATOMIC); /* BUG - we are ignoring any failure */ if (!e1 || !e2 || !e3) goto out_nosplit; spin_lock(&bl->bl_ext_lock); be = bl_find_get_extent_locked(bl, offset); rv = be->be_f_offset + be->be_length; if (be->be_state != PNFS_BLOCK_INVALID_DATA) { spin_unlock(&bl->bl_ext_lock); goto out_nosplit; } /* Add e* to children, bumping e*'s krefs */ if (be->be_f_offset != offset) { _prep_new_extent(e1, be, be->be_f_offset, offset - be->be_f_offset, PNFS_BLOCK_INVALID_DATA); children[i++] = e1; print_bl_extent(e1); } else merge1 = e1; _prep_new_extent(e2, be, offset, min(length, be->be_f_offset + be->be_length - offset), PNFS_BLOCK_READWRITE_DATA); children[i++] = e2; print_bl_extent(e2); if (offset + length < be->be_f_offset + be->be_length) { _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length, be->be_f_offset + be->be_length - offset - length, PNFS_BLOCK_INVALID_DATA); children[i++] = e3; print_bl_extent(e3); } else merge2 = e3; /* Remove be from list, and insert the e* */ /* We don't get refs on e*, since this list is the base reference * set when init'ed. */ if (i < 3) children[i] = NULL; new = children[0]; list_replace(&be->be_node, &new->be_node); bl_put_extent(be); new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1); for (j = 1; j < i; j++) { old = new; new = children[j]; list_add(&new->be_node, &old->be_node); } if (merge2) { /* This is a HACK, should just create a _back_merge function */ new = list_entry(new->be_node.next, struct pnfs_block_extent, be_node); new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2); } spin_unlock(&bl->bl_ext_lock); /* Since we removed the base reference above, be is now scheduled for * destruction. */ bl_put_extent(be); dprintk("%s returns %llu after split\n", __func__, rv); return rv; out_nosplit: kfree(e1); kfree(e2); kfree(e3); dprintk("%s returns %llu without splitting\n", __func__, rv); return rv; } void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, const struct nfs4_layoutcommit_args *arg, int status) { struct pnfs_block_short_extent *lce, *save; dprintk("%s status %d\n", __func__, status); list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) { if (likely(!status)) { u64 offset = lce->bse_f_offset; u64 end = offset + lce->bse_length; do { offset = set_to_rw(bl, offset, end - offset); } while (offset < end); list_del(&lce->bse_node); kfree(lce); } else { list_del(&lce->bse_node); spin_lock(&bl->bl_ext_lock); add_to_commitlist(bl, lce); spin_unlock(&bl->bl_ext_lock); } } } int bl_push_one_short_extent(struct pnfs_inval_markings *marks) { struct pnfs_block_short_extent *new; new = kmalloc(sizeof(*new), GFP_NOFS); if (unlikely(!new)) return -ENOMEM; spin_lock_bh(&marks->im_lock); list_add(&new->bse_node, &marks->im_extents); spin_unlock_bh(&marks->im_lock); return 0; } struct pnfs_block_short_extent * bl_pop_one_short_extent(struct pnfs_inval_markings *marks) { struct pnfs_block_short_extent *rv = NULL; spin_lock_bh(&marks->im_lock); if (!list_empty(&marks->im_extents)) { rv = list_entry((&marks->im_extents)->next, struct pnfs_block_short_extent, bse_node); list_del_init(&rv->bse_node); } spin_unlock_bh(&marks->im_lock); return rv; } void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free) { struct pnfs_block_short_extent *se = NULL, *tmp; if (num_to_free <= 0) return; spin_lock(&marks->im_lock); list_for_each_entry_safe(se, tmp, &marks->im_extents, bse_node) { list_del(&se->bse_node); kfree(se); if (--num_to_free == 0) break; } spin_unlock(&marks->im_lock); BUG_ON(num_to_free > 0); }
gpl-2.0